import torch
import sentencepiece as spm
from pathlib import Path
from dataloader import Emilia, DynamicBatchSampler, collate_fn
from torch.utils.data import DataLoader
from train import init_model
import os
import numpy as np

# WER计算函数
# 支持中英文，按空格或单字对齐

def wer(ref, hyp):
    # ref, hyp: 字符串
    ref = list(ref.strip().replace(' ', ''))
    hyp = list(hyp.strip().replace(' ', ''))
    d = np.zeros((len(ref)+1, len(hyp)+1), dtype=np.uint8)
    for i in range(len(ref)+1):
        d[i][0] = i
    for j in range(len(hyp)+1):
        d[0][j] = j
    for i in range(1, len(ref)+1):
        for j in range(1, len(hyp)+1):
            if ref[i-1] == hyp[j-1]:
                d[i][j] = d[i-1][j-1]
            else:
                d[i][j] = min(d[i-1][j], d[i][j-1], d[i-1][j-1]) + 1
    return d[len(ref)][len(hyp)] / max(1, len(ref))

# 贪心解码
@torch.no_grad()
def greedy_decode(model, audio, lengths, tokenizer, max_len=100, temperature=0.8):
    device = audio.device
    model.eval()
    batch_size = audio.size(0)
    pad_id = tokenizer.pad_id()
    bos_id = tokenizer.bos_id() if hasattr(tokenizer, 'bos_id') else 2
    eos_id = tokenizer.eos_id() if hasattr(tokenizer, 'eos_id') else 3
    vocab_size = tokenizer.vocab_size()
    
    # 初始输入BOS
    ys = torch.full((batch_size, 1), bos_id, dtype=torch.long, device=device)
    finished = torch.zeros(batch_size, dtype=torch.bool, device=device)
    
    # 添加重复检测
    last_tokens = torch.full((batch_size, 5), pad_id, dtype=torch.long, device=device)  # 记录最近5个token
    repeat_count = torch.zeros(batch_size, dtype=torch.long, device=device)  # 重复计数器
    
    for step in range(max_len):
        logits = model(audio, lengths, ys)
        
        # 确保logits维度正确: (batch_size, seq_len, vocab_size)
        if logits.dim() == 3:
            # 取最后一个时间步的logits
            current_logits = logits[:, -1, :]  # (batch_size, vocab_size)
        else:
            # 如果logits是2D，直接使用
            current_logits = logits
        
        # 应用温度采样
        current_logits = current_logits / temperature
        
        # 对已完成的序列，保持EOS token
        current_logits[finished, :] = float('-inf')
        current_logits[finished, eos_id] = 0
        
        # 获取下一个token
        if temperature > 0:
            # 使用温度采样
            probs = torch.softmax(current_logits, dim=-1)
            next_token = torch.multinomial(probs, 1).squeeze(-1)
        else:
            # 贪心解码
            next_token = current_logits.argmax(-1)
        
        # 检查重复模式
        if step >= 5:
            # 检查最近5个token是否重复
            recent_tokens = last_tokens[:, -5:]
            is_repeating = (recent_tokens == next_token.unsqueeze(1)).all(dim=1)
            repeat_count[is_repeating] += 1
            
            # 如果重复次数过多，强制结束
            if (repeat_count >= 3).any():
                finished = finished | (repeat_count >= 3)
        
        ys = torch.cat([ys, next_token.unsqueeze(1)], dim=1)
        finished = finished | (next_token == eos_id)
        
        # 更新last_tokens
        last_tokens = torch.cat([last_tokens[:, 1:], next_token.unsqueeze(1)], dim=1)
        
        if finished.all():
            break
    
    # 去掉BOS
    return ys[:, 1:]

if __name__ == "__main__":
    # 加载分词器
    model_file = "./tokenizer/chinese.model"
    chinese_sp = spm.SentencePieceProcessor(model_file=model_file)
    vocab_size = chinese_sp.vocab_size()
    enc_dim = 256
    num_enc_layers = 12
    num_dec_layers = 6
    dropout = 0.1
    model = init_model(vocab_size, enc_dim, num_enc_layers, num_dec_layers, dropout)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 加载最优模型权重
    ckpt_path = "./ckpts/best_model.pt"
    if os.path.exists(ckpt_path):
        checkpoint = torch.load(ckpt_path, map_location=device)
        model.load_state_dict(checkpoint['model_state_dict'])
        print(f"Loaded checkpoint: {ckpt_path}")
    else:
        print(f"Checkpoint not found: {ckpt_path}")
        exit(1)

    # 加载测试集
    data_dir = Path("./data/Emilia-YODAS/ZH")
    sample_rate = 16000
    batch_time = 24
    train_files, val_files, test_files = Emilia.split_dataset(data_dir, train_ratio=0.8, val_ratio=0.1, test_ratio=0.1, seed=42)
    test_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=test_files)
    test_loader = DataLoader(
        test_set,
        batch_sampler=DynamicBatchSampler(test_set, max_frames=batch_time * sample_rate, shuffle=False),
        num_workers=2,
        collate_fn=collate_fn,
    )

    # 推理与评测
    model.eval()
    total_wer = 0
    count = 0
    for batch in test_loader:
        audio = batch["audio"].to(device)
        lengths = batch["lengths"].to(device)
        texts = batch["text"]
        # 解码
        pred_ids = greedy_decode(model, audio, lengths, chinese_sp, max_len=100, temperature=0.7)
        for i in range(audio.size(0)):
            # 原文
            ref = texts[i]
            # 预测
            pred = pred_ids[i].cpu().numpy().tolist()
            # 截断到eos
            if chinese_sp.eos_id() in pred:
                pred = pred[:pred.index(chinese_sp.eos_id())]
            
            # 过滤掉特殊token
            filtered_pred = [x for x in pred if x != chinese_sp.pad_id() and x != chinese_sp.bos_id()]
            
            # 解码
            hyp = chinese_sp.decode_ids(filtered_pred)
            
            # 后处理：移除重复的标点符号和空格
            hyp = hyp.strip()
            
            # 计算WER
            wer_score = wer(ref, hyp)
            total_wer += wer_score
            count += 1
            print(f"原文: {ref}")
            print(f"预测: {hyp}")
            print(f"WER: {wer_score:.3f}\n")
    print(f"平均WER: {total_wer/count:.3f}")
