from typing import Dict
from einops import rearrange
import torch
import torch.nn as nn
from vector_quantize_pytorch.random_projection_quantizer import RandomProjectionQuantizer
import torch.nn.functional as F




class SimpleStackProjector6x(nn.Module):
    def __init__(self, input_dim=80):
        super().__init__()
        self.input_dim = input_dim

    def forward(self, x):
        """
        从输入特征 x:(B,T,F) 中，按“两层一维卷积（k=3,s=2；k=5,s=3）”的采样几何去抽取时间索引，
        把每个输出时间步拼接 6 帧（因此输出维度 6×F；当 F=80 时是 480）。
        x: (B, T, F)
        x_lengths: (B,)
        return:
            out: (B, T_out, proj_dim)
            out_lengths: (B,)
        """
        B, T, F = x.size()
        assert F == self.input_dim, f"Expect input_dim={self.input_dim}, got {F}"

        # ---- Step 1: 模拟两层卷积的长度计算
        input_length_max = x.shape[1]
        L1 = ((input_length_max - 3) // 2 + 1)
        max_len = ((L1 - 5) // 3 + 1)
        if max_len <= 0:
            return torch.zeros(B, 0, self.input_dim * 6, device=x.device, dtype=x.dtype)

        # ---- Step 2: 构造索引，保证对齐两层卷积的采样点
        # 第1层卷积的索引位置
        idx1 = torch.arange(T, device=x.device)
        idx1 = idx1.unfold(0, 3, 2)  # 模拟 (stack_frames=3, stride=2)
        # idx1: (L1_max, 3)

        # 第2层卷积在 idx1 上再取 5 帧，stride=3
        idx2 = torch.arange(idx1.size(0), device=x.device)
        idx2 = idx2.unfold(0, 5, 3)  # (L2_max, 5)

        # 最终组合索引
        idx = idx1[idx2]  # (L2_max, 5, 3)
        idx = idx.reshape(idx.size(0), -1)  # (L2_max, 15)
        # 由于每个位置有重复采样 → 取前 6 个等效帧
        idx = idx[:, :6]  # (L2_max, 6)

        # ---- Step 3: 堆叠帧
        x_stacked = x[:, idx, :]   # (B, L2_max, 6, F)
        x_stacked = x_stacked.reshape(B, max_len, -1)

        # ---- Step 4: 线性投影
        # out = self.linear(x_stacked)

        return x_stacked

import math

def _cos(x: float) -> float:
    # 0→1 的余弦升温
    x = min(1.0, max(0.0, x))
    return 0.5 * (1.0 - math.cos(math.pi * x))

def mask_schedule(step: int, total_steps: int):
    """
    返回 (p, L) ：起点概率 p、span_len L（int）
    三阶段：0-0.3T 涨 p；0.3T-0.6T 涨 L；0.6T-1.0T 再涨 p
    """
    T = max(1, total_steps)
    s = max(0, step)

    # 阶段边界
    a, b = int(0.3 * T), int(0.6 * T)
    # 目标区间
    p0, p1_mid, p1 = 0.03, 0.06, 0.09
    L0, L1 = 5, 10
    if s <= a:
        # A: 只涨 p: 0.01→0.05, L=3
        alpha = _cos(s / a)
        p = p0 + (p1_mid - p0) * alpha
        L = L0
    elif s <= b:
        # B: p 固定 0.05, L: 3→10
        beta = _cos((s - a) / (b - a))
        p = p1_mid
        L = int(round(L0 + (L1 - L0) * beta))
    else:
        # C: L 固定 10, p: 0.05→0.08
        gamma = _cos((s - b) / (T - b))
        p = p1_mid + (p1 - p1_mid) * gamma
        L = L1

    # 安全夹紧
    p = float(min(0.99, max(0.0, p)))
    L = max(1, int(L))
    return p, L

def expected_coverage(p: float, L: int) -> float:
    return 1.0 - (1.0 - p) ** L


def get_bert_mask(seq_mask, step_now=None):
    """
    seq_mask: (B, T)，前 N 个位置 True，后面 False
    prob: 每个位置被选为起点的概率
    span_len: 每次 mask 的连续长度

    返回:
        bert_mask:   (B, T)，在 choice_mask=True 的位置把 seq_mask 改 False
        choice_mask: (B, T)，哪些位置被选中
    """
    if step_now is None:
        prob,span_len  = 0.08, 7
    else:
        prob, span_len = mask_schedule(step_now, 50000)
    B, T = seq_mask.shape
    device = seq_mask.device

    # ---- Step1: 随机起点选择 (伯努利分布)
    start_mask = (torch.rand((B, T), device=device) < prob) & seq_mask

    # ---- Step2: 扩展到 span_len
    choice_mask = torch.zeros((B, T), dtype=torch.bool, device=device)
    for shift in range(span_len):
        shifted = torch.roll(start_mask, shifts=shift, dims=1)
        # 防止跨越无效区
        shifted &= torch.cat(
            [torch.ones((B, T - shift), dtype=torch.bool, device=device),
             torch.zeros((B, shift), dtype=torch.bool, device=device)], dim=1
        )
        choice_mask |= shifted

    # ---- Step3: 限制在 seq_mask 范围内
    choice_mask &= seq_mask

    # ---- Step4: 保证至少有一个块被选中
    for b in range(B):
        if choice_mask[b].sum() == 0 and seq_mask[b].any():
            valid_idx = torch.nonzero(seq_mask[b], as_tuple=False).squeeze(1)
            if len(valid_idx) > 0:
                start = valid_idx[torch.randint(0, len(valid_idx), (1,))]
                end = min(start + span_len, T)
                choice_mask[b, start:end] = True

    # ---- Step5: 确保每个 batch 中的每个 item 至少有一个非 mask 的位置
    for b in range(B):
        # 获取非 mask 的位置
        non_mask_idx = torch.nonzero(seq_mask[b] & ~choice_mask[b], as_tuple=False).squeeze(1)

        if len(non_mask_idx) > 0:
            # 如果有效位置有大于等于1个非 mask的位置，则跳过此步骤
            pass
        else:
            # 确保有效位置数大于等于 2
            valid_idx = torch.nonzero(seq_mask[b], as_tuple=False).squeeze(1)
            assert len(valid_idx) >= 2, f"Seq_mask must have at least 2 valid positions for batch {b}"

            # 随机选择一半的有效位置设置为非 mask
            half_valid_idx = valid_idx[torch.randperm(len(valid_idx))[:len(valid_idx) // 2]]

            # 将一半的有效位置设置为非 mask
            choice_mask[b, half_valid_idx] = False

    # ---- Step6: 构造 bert_mask
    bert_mask = seq_mask & ~choice_mask
    actual_cov = choice_mask.sum() / seq_mask.sum()
    return bert_mask, choice_mask, actual_cov, prob, span_len


def make_seq_mask(feat_lengths, max_len=None):
    """
    feat_lengths: (batch,) 每条序列的有效帧数
    max_len: 可选，默认取 feat_lengths 的最大值
    return: (batch, max_len) bool mask
            True = 有效帧, False = padding
    """
    batch_size = feat_lengths.size(0)
    if max_len is None:
        max_len = feat_lengths.max().item()

    # (max_len,) -> (1, max_len)
    seq_range = torch.arange(max_len, device=feat_lengths.device).unsqueeze(0)

    # (batch, 1) vs (1, max_len) 广播
    seq_mask = seq_range < feat_lengths.unsqueeze(1)

    return seq_mask



class BestRQ(nn.Module):
    def __init__(
        self,
        encoder: torch.nn.Module,
        codebook_size: int=1024,
        codebook_dim: int=16,
    ):
        super().__init__()

        self.rpq = nn.Sequential(
            SimpleStackProjector6x(80),
            nn.LayerNorm(480, elementwise_affine = True),
            RandomProjectionQuantizer(
                dim = 480,
                codebook_size = codebook_size,
                codebook_dim = codebook_dim,
                norm = False
            )
        )
        # self.rpq.requires_grad = False
        self.rpq.requires_grad_(False)
        self.encoder = encoder
        self.embed_dim = 384

        self.pad_id = -100

        self.to_logits = nn.Sequential(
            nn.LayerNorm(self.embed_dim),
            nn.Linear(self.embed_dim, codebook_size)
        )
        # self.mask_prob = 0.80


    def forward(
        self,
        batch: Dict,
        device: torch.device,
        # x,
        # x_lengths,
    ):
        """
        输入 wav samples;
        x: (batch, time)
        x_lengths: (batch,)

        """
        x = batch['feats'].to(device)
        x_lengths = batch['feats_lengths'].to(device)
        labels = self.rpq(x)
        # print(f'labels shape: {labels.shape}')

        feat_seq_mask = make_seq_mask(x_lengths)
        feat_seq_mask = feat_seq_mask[:, 10::6]
        batch_idx = batch.get('batch_idx', None)

        # mask_with_bert_mask, dropped_mask = get_mask_subset_prob(feat_seq_mask)
        mask_with_bert_mask, dropped_mask,actual_cov, prob, span_len = get_bert_mask(feat_seq_mask, step_now=batch_idx)

        features, mask  = self.encoder(
            x,
            x_lengths,
            mask_with_bert_mask,
        )
        logits = self.to_logits(features)
        logits = rearrange(logits, "b n c -> b c n")

        masked_labels = labels.masked_fill(~dropped_mask, self.pad_id)
        loss = F.cross_entropy(logits, masked_labels, ignore_index=self.pad_id)

        # ===== 新增: 计算预测正确率、有效标签数、预测去重标签数 =====
        # with torch.no_grad():
        #     preds = logits.argmax(dim=1)  # (b, n)
        #     valid_mask = masked_labels != self.pad_id  #真实掩盖掉的位置
        #
        #     # 准确率
        #     correct = (preds == masked_labels) & valid_mask
        #     if valid_mask.sum() > 0:
        #         acc = correct.sum().float() / valid_mask.sum().float()
        #     else:
        #         acc = torch.tensor(0.0, device=logits.device)
        #
        #     # 有效标签位置的总数
        #     total_valid = valid_mask.sum().item()
        #
        #     # 去重后的预测标签总数
        #     # print(f'去重之前的预测内容: {preds[valid_mask]}')
        #     unique_pred_count = preds[valid_mask].unique().numel()
        #     # print(f'去重后的内容: {preds[valid_mask].unique()}')
        #     prediction_diversity_ratio = unique_pred_count / total_valid if total_valid > 0 else 0.0

        with torch.no_grad():
            # 假定 logits 形状为 (B, C, N)；若是 (B, N, C)，请先 logits = logits.transpose(1, 2)
            preds = logits.argmax(dim=1)  # (B, N)  在被 mask 的位置上的预测类别
            valid_mask = (masked_labels != self.pad_id)  # True=被mask且需要监督的位置

            total_valid = int(valid_mask.sum())
            if total_valid == 0:
                acc = torch.tensor(0.0, device=logits.device)
                unique_pred_count = unique_label_count = 0
                prediction_diversity_ratio = label_diversity_ratio = 0.0
                pred_singleton_count = label_singleton_count = 0
                pred_singleton_ratio = label_singleton_ratio = 0.0
                pred_entropy = label_entropy = 0.0
            else:
                # === 准确率（仅在有效位置）
                acc = ((preds == masked_labels) & valid_mask).float().sum() / valid_mask.float().sum()

                # === 展平有效位置
                pred_valid = preds[valid_mask].contiguous().view(-1)  # (M,)
                label_valid = masked_labels[valid_mask].contiguous().view(-1)  # (M,)
                num_classes = logits.size(1)

                # === 统计函数：多样率 & singleton 率 & 熵
                def stats_1d(x: torch.Tensor, K: int):
                    # x: int64 indices on device
                    counts = torch.bincount(x, minlength=K)  # (K,)
                    used = counts > 0
                    unique_count = int(used.sum())
                    diversity_ratio = unique_count / x.numel()
                    singleton = (counts == 1).sum().item()
                    singleton_ratio = singleton / x.numel()
                    p = counts[counts > 0].float() / float(x.numel())
                    entropy = float(-(p * p.log()).sum())  # nats；如需 bits 可 / math.log(2)
                    return unique_count, diversity_ratio, singleton, singleton_ratio, entropy

                # 预测的多样性
                (unique_pred_count,
                 prediction_diversity_ratio,
                 pred_singleton_count,
                 pred_singleton_ratio,
                 pred_entropy) = stats_1d(pred_valid, num_classes)

                # 标签（教师）的多样性
                (unique_label_count,
                 label_diversity_ratio,
                 label_singleton_count,
                 label_singleton_ratio,
                 label_entropy) = stats_1d(label_valid, num_classes)

        # 返回字典形式
        return {
            "loss": loss,
            "acc": acc,
            "total_valid": total_valid,
            # 预测多样性
            "pred_unique_count": unique_pred_count,
            "pred_diversity_ratio": prediction_diversity_ratio,  # unique / M
            "pred_singleton_count": pred_singleton_count,  # 只出现一次的类别个数
            "pred_singleton_ratio": pred_singleton_ratio,  # singleton / M
            "pred_entropy_nats": pred_entropy,

            # 标签（教师）多样性
            "label_unique_count": unique_label_count,
            "label_diversity_ratio": label_diversity_ratio,
            "label_singleton_count": label_singleton_count,
            "label_singleton_ratio": label_singleton_ratio,
            "label_entropy_nats": label_entropy,

            "actual_cov": actual_cov,
            "prob": prob,
            "span_len": span_len,
        }




