# utils/inference.py
import torch
import torch.nn as nn
from model.model import Transformer
from config import Config
from utils.data_loader import get_data_loaders
from utils.bleu4 import calculate_bleu4
from tqdm import tqdm

def create_mask(seq, pad_idx):
    """创建注意力掩码矩阵"""
    return (seq != pad_idx).unsqueeze(-2)

def beam_search_decode(model, src, src_mask, max_len, start_symbol, end_symbol, beam_width=5, device='cuda'):
    """使用束搜索进行解码"""
    model.eval()
    memory = model.encode(src, src_mask)
    
    # 初始化搜索对象
    batch_size = src.shape[0]
    scores = torch.zeros(batch_size, beam_width).to(device)
    sequences = torch.full((batch_size, beam_width, 1), start_symbol, dtype=torch.long).to(device)
    
    # 对每个时间步进行解码
    for i in range(max_len - 1):
        curr_size = sequences.shape[-1]
        
        # 为每个候选序列预测下一个词
        all_candidates = []
        all_scores = []
        
        for b in range(batch_size):
            candidates = []
            candidate_scores = []
            
            for beam in range(beam_width):
                seq = sequences[b, beam].unsqueeze(0)  # [1, curr_size]
                
                # 创建目标掩码
                tgt_mask = torch.triu(
                    torch.ones((curr_size, curr_size), device=device) * float('-inf'),
                    diagonal=1
                )
                
                # 前向传播
                out = model.decode(seq, memory[b:b+1], None, src_mask[b:b+1])
                prob = model.fc_out(out[:, -1])  # [1, vocab_size]
                
                # 获取top-k个候选
                topk_probs, topk_idx = torch.topk(prob, k=beam_width)
                
                # 更新分数并添加候选
                for j in range(beam_width):
                    new_score = scores[b, beam] - topk_probs[0, j].item()
                    new_seq = torch.cat([seq, topk_idx[0, j].unsqueeze(0).unsqueeze(0)], dim=1)
                    candidates.append(new_seq)
                    candidate_scores.append(new_score)
            
            # 选择最佳的beam_width个候选
            candidate_scores = torch.tensor(candidate_scores, device=device)
            top_indices = candidate_scores.argsort()[:beam_width]
            
            all_candidates.append([candidates[idx] for idx in top_indices])
            all_scores.append([candidate_scores[idx].item() for idx in top_indices])
        
        # 更新序列和分数
        sequences = torch.stack([torch.stack([cand for cand in batch_cands]) for batch_cands in all_candidates])
        scores = torch.tensor(all_scores, device=device)
        
        # 检查是否所有序列都结束
        if all([(end_symbol in seq).any() for seq in sequences.view(-1, sequences.shape[-1])]):
            break
    
    # 返回每个batch中分数最高的序列
    best_sequences = sequences[:, 0]  # [batch_size, seq_len]
    return best_sequences

def inference(config):
    device = torch.device(config.device if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 获取数据加载器
    _, _, test_loader = get_data_loaders(config)
    
    # 初始化模型
    model = Transformer(
        src_vocab_size=len(test_loader.dataset.src_vocab),
        tgt_vocab_size=len(test_loader.dataset.tgt_vocab),
        embedding_dim=config.embedding_dim,
        hidden_dim=config.hidden_dim,
        num_heads=config.num_heads,
        num_layers=config.num_layers,
        dropout=config.dropout,
        max_seq_length=config.max_seq_length
    ).to(device)
    
    # 加载训练好的模型
    if torch.cuda.is_available():
        state_dict = torch.load(config.save_model_path)
    else:
        state_dict = torch.load(config.save_model_path, map_location='cpu')
    
    # 如果是DataParallel保存的模型，需要处理键名
    if list(state_dict.keys())[0].startswith('module.'):
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:] # 去掉'module.'
            new_state_dict[name] = v
        state_dict = new_state_dict
    
    model.load_state_dict(state_dict)
    print(f"模型已从 {config.save_model_path} 加载")
    
    model.eval()
    total_bleu4 = 0
    translations = []
    
    with torch.no_grad():
        for batch in tqdm(test_loader, desc="正在翻译"):
            src = batch['src'].to(device)
            tgt = batch['tgt'].to(device)
            src_mask = batch['src_mask'].to(device)
            tgt_mask = batch['tgt_mask'].to(device)
            
            # 前向传播
            output = model(src, tgt[:, :-1], src_mask, tgt_mask[:, :-1])
            
            # 获取预测的token ID
            pred_tokens = torch.argmax(output, dim=-1).cpu().numpy()
            tgt_tokens = tgt[:, 1:].cpu().numpy()  # 去掉<sos>
            
            # 处理每个句子
            for i in range(len(pred_tokens)):
                # 获取源文本（中文）
                src_seq = [test_loader.dataset.src_vocab.itos[idx] for idx in src[i].cpu().numpy()
                          if idx < len(test_loader.dataset.src_vocab.itos) and 
                          test_loader.dataset.src_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                
                # 获取预测文本（英文）
                pred_seq = [test_loader.dataset.tgt_vocab.itos[idx] for idx in pred_tokens[i]
                           if idx < len(test_loader.dataset.tgt_vocab.itos) and 
                           test_loader.dataset.tgt_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                
                # 获取参考文本（英文）
                tgt_seq = [test_loader.dataset.tgt_vocab.itos[idx] for idx in tgt_tokens[i]
                          if idx < len(test_loader.dataset.tgt_vocab.itos) and 
                          test_loader.dataset.tgt_vocab.itos[idx] not in ['<pad>', '<sos>', '<eos>']]
                
                # 计算BLEU-4
                if pred_seq and tgt_seq:
                    bleu4 = calculate_bleu4(tgt_seq, pred_seq)
                    total_bleu4 += bleu4
                
                # 保存翻译结果
                translations.append({
                    'source': ''.join(src_seq),  # 中文不需要空格
                    'translation': ' '.join(pred_seq),
                    'reference': ' '.join(tgt_seq)
                })
                
                # 显示第一个样例的预览
                if len(translations) == 1:
                    preview_text = f"\n翻译预览:\n"
                    preview_text += f"源文本: {translations[0]['source']}\n"
                    preview_text += f"翻译: {translations[0]['translation']}\n"
                    preview_text += f"参考: {translations[0]['reference']}\n"
                    preview_text += f"BLEU-4: {bleu4:.4f}\n"
                    print(preview_text)
    
    # 计算平均BLEU-4
    avg_bleu4 = total_bleu4 / len(test_loader.dataset)
    print(f'平均 BLEU-4 分数: {avg_bleu4:.4f}')
    
    output_file = config.translations_file
    # 保存翻译结果
    with open(output_file, 'w', encoding='utf-8') as f:
        for i, trans in enumerate(translations):
            f.write(f"样例 {i+1}:\n")
            f.write(f"源文本: {trans['source']}\n")
            f.write(f"翻译: {trans['translation']}\n")
            f.write(f"参考: {trans['reference']}\n")
            f.write("-" * 50 + "\n")
    
    print(f"翻译结果已保存到 {output_file}")
    
    return avg_bleu4