import torch
from os import path
from torch.nn.utils.rnn import pad_sequence
from src.model import Transformer_Seq2Seq
from torch.utils.data import Dataset, DataLoader
from tokenizers import Tokenizer
from datasets import load_from_disk
from typing import Tuple, List
from tqdm import tqdm
import sacrebleu
import random
import os
import json
from datetime import datetime


class MTDataset(Dataset):
    def __init__(
        self,
        dataset_path: str,  # 改为接收预处理好的数据集路径
        tokenizer_path: str,  # 仍然需要 tokenizer 来获取 pad_id
    ):
        print(f"正在从磁盘加载预处理数据集: {dataset_path}")
        # 关键：从磁盘加载，这是瞬时的
        self.dataset = load_from_disk(dataset_path)
        print("加载完毕。")

        # 我们仍然需要 pad_id 用于 collate_fn
        tokenizer = Tokenizer.from_file(tokenizer_path)
        self.pad_id = tokenizer.token_to_id("[PAD]")
        if self.pad_id is None:
            raise ValueError("[PAD] token not found in tokenizer")

    # 不再需要 _tokenize_function
    # 不再需要 bos_id, eos_id, max_length

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor]:
        item = self.dataset[idx]
        return item["src_ids"], item["tgt_ids"]


def collate_fn(batch, pad_id):
    src_batch, tgt_batch = zip(*batch)
    src_batch = pad_sequence(src_batch, batch_first=True, padding_value=pad_id)
    tgt_batch = pad_sequence(tgt_batch, batch_first=True, padding_value=pad_id)
    src_key_padding_mask = src_batch == pad_id
    tgt_key_padding_mask = tgt_batch == pad_id
    return {
        "src_ids": src_batch,
        "tgt_ids": tgt_batch,
        "src_key_padding_mask": src_key_padding_mask,
        "tgt_key_padding_mask": tgt_key_padding_mask,
    }


def decode_tokens(tokenizer, token_ids: torch.Tensor, pad_id: int, bos_id: int, eos_id: int) -> str:
    """将token IDs解码为文本"""
    # 移除padding和特殊token
    tokens = []
    for token_id in token_ids:
        if token_id == pad_id or token_id == bos_id or token_id == eos_id:
            continue
        tokens.append(token_id.item())
    
    if not tokens:
        return ""
    
    return tokenizer.decode(tokens)


def calculate_bleu_score(predictions: List[str], references: List[str]) -> float:
    """计算BLEU分数（0-100分）"""
    if not predictions or not references:
        return 0.0
    
    # 确保预测和参考数量相同
    min_len = min(len(predictions), len(references))
    predictions = predictions[:min_len]
    references = references[:min_len]
    
    # 使用sacrebleu计算BLEU分数
    # references需要是每个参考翻译的列表，对于单参考翻译，每个元素是一个包含一个字符串的列表
    references = [[ref] for ref in references]
    bleu = sacrebleu.corpus_bleu(predictions, references)
    return bleu.score

@torch.no_grad()
def evaluate_on_val(
    model, 
    val_dataloader, 
    loss_fn, 
    tokenizer,
    train_dataset,
    step: int,
    output_dir: str = "results",
    device: torch.device = torch.device("cpu"),
    num_samples: int = 10
) -> Tuple[float, float]:
    """
    优化的验证评估函数，单次遍历完成所有计算：
    1. 计算验证集loss
    2. 计算BLEU分数（使用相同的生成结果）
    3. 采样翻译样本
    4. 保存评估结果
    
    Returns:
        Tuple[float, float]: (验证集loss, BLEU分数)
    """
    model.eval()
    total_loss = 0
    batch_count = 0
    
    # 收集BLEU计算所需数据
    all_predictions = []
    all_references = []
    
    bos_id = tokenizer.token_to_id("[BOS]")
    eos_id = tokenizer.token_to_id("[EOS]")
    pad_id = tokenizer.token_to_id("[PAD]")
    
    print("正在计算验证集loss和BLEU分数...")
    pbar = tqdm(val_dataloader, desc="评估验证集")
    
    with torch.no_grad():
        for batch in pbar:
            src_ids = batch["src_ids"].to(device)
            tgt_ids = batch["tgt_ids"].to(device)
            src_key_padding_mask = batch["src_key_padding_mask"].to(device)
            tgt_key_padding_mask = batch["tgt_key_padding_mask"].to(device)
            
            # 计算loss
            decoder_input_ids = tgt_ids[:, :-1]
            labels = tgt_ids[:, 1:].contiguous()
            tgt_key_padding_mask_for_model = tgt_key_padding_mask[:, :-1]

            logits = model(
                src_ids=src_ids,
                tgt_ids=decoder_input_ids,
                src_key_padding_mask=src_key_padding_mask,
                tgt_key_padding_mask=tgt_key_padding_mask_for_model
            )
            loss = loss_fn(
                logits.view(-1, logits.size(-1)), labels.view(-1)
            )
            total_loss += loss.item()
            batch_count += 1
            
            # 同时生成翻译用于BLEU计算（使用beam search提高质量）
            # 批量生成，而不是在循环中单个生成
            generated_ids = model.generate(
                src_ids=src_ids,
                max_length=128,
                bos_token_id=bos_id,
                eos_token_id=eos_id,
                pad_token_id=pad_id,
                beam_size=4,  # 使用beam search提高BLEU分数准确性
                src_key_padding_mask=src_key_padding_mask,
                min_new_tokens=5,
                length_penalty_alpha=0.6
            )
            
            # 解码整个批次
            for j in range(src_ids.size(0)):
                tgt_text = decode_tokens(tokenizer, tgt_ids[j], pad_id, bos_id, eos_id)
                pred_text = decode_tokens(tokenizer, generated_ids[j], pad_id, bos_id, eos_id)
                
                all_references.append(tgt_text)
                all_predictions.append(pred_text)
            
            pbar.set_postfix(loss=loss.item())
    
    avg_loss = total_loss / batch_count if batch_count > 0 else 0.0
    bleu_score = calculate_bleu_score(all_predictions, all_references)
    
    # 采样翻译样本（从训练集采样）
    print(f"正在采样 {num_samples} 个翻译样本...")
    samples = []
    indices = random.sample(range(len(train_dataset)), min(num_samples, len(train_dataset)))
    
    with torch.no_grad():
        for idx in indices:
            src_ids, tgt_ids = train_dataset[idx]
            
            # 添加batch维度
            src_ids = src_ids.unsqueeze(0).to(device)
            tgt_ids = tgt_ids.unsqueeze(0).to(device)
            
            # 创建padding mask
            src_key_padding_mask = src_ids == pad_id
            
            # 生成翻译
            generated_ids = model.generate(
                src_ids=src_ids,
                max_length=128,
                bos_token_id=bos_id,
                eos_token_id=eos_id,
                pad_token_id=pad_id,
                beam_size=4,
                src_key_padding_mask=src_key_padding_mask
            )
            
            # 解码
            src_text = decode_tokens(tokenizer, src_ids[0], pad_id, bos_id, eos_id)
            tgt_text = decode_tokens(tokenizer, tgt_ids[0], pad_id, bos_id, eos_id)
            pred_text = decode_tokens(tokenizer, generated_ids[0], pad_id, bos_id, eos_id)
            
            samples.append({
                "index": idx,
                "source": src_text,
                "target": tgt_text,
                "prediction": pred_text
            })
    
    # 保存评估结果
    os.makedirs(output_dir, exist_ok=True)
    filename = "evaluation_results.txt"
    filepath = os.path.join(output_dir, filename)
    
    with open(filepath, "a", encoding="utf-8") as f:
        f.write(f"=== 结果 - Step {step} ===\n")
        f.write(f"时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"验证集Loss: {avg_loss:.4f}\n")
        f.write(f"BLEU分数: {bleu_score:.2f}\n\n")
        
        f.write("=== 样本 ===\n")
        for i, sample in enumerate(samples, 1):
            f.write(f"\n样本 {i}:\n")
            f.write(f"源: {sample['source']}\n")
            f.write(f"目标: {sample['target']}\n")
            f.write(f"预测: {sample['prediction']}\n")
            f.write("-" * 50 + "\n")
        f.write("\n" + "=" * 80 + "\n\n")
    
    print(f"评估结果已追加保存到: {filepath}")
    print(f"验证集Loss: {avg_loss:.4f}")
    print(f"BLEU分数: {bleu_score:.2f}")
    
    return avg_loss, bleu_score


def train_one_epoch(
    model,
    optimizer,
    scheduler,
    train_dataloader,
    val_dataloader,
    loss_fn,
    tokenizer,
    train_dataset,
    device,
    eval_steps: int = 100,
    current_epoch: int = 0,
    output_dir: str = "results",
    max_grad_norm: float = 1.0
):
    model.train()
    total_loss = 0
    global_step = current_epoch * len(train_dataloader)
    pbar = tqdm(train_dataloader, desc=f"Training Epoch {current_epoch + 1}")

    for batch_idx, batch in enumerate(pbar):
        optimizer.zero_grad()  # 显式清零梯度

        src_ids = batch["src_ids"].to(device)
        tgt_ids = batch["tgt_ids"].to(device)
        src_key_padding_mask = batch["src_key_padding_mask"].to(device)
        tgt_key_padding_mask = batch["tgt_key_padding_mask"].to(device)
        
        # 统一损失计算方式
        decoder_input_ids = tgt_ids[:, :-1]
        labels = tgt_ids[:, 1:].contiguous()
        tgt_key_padding_mask_for_model = tgt_key_padding_mask[:, :-1]

        logits = model(
            src_ids=src_ids,
            tgt_ids=decoder_input_ids,
            src_key_padding_mask=src_key_padding_mask,
            tgt_key_padding_mask=tgt_key_padding_mask_for_model
        )

        # 检查NaN
        if torch.isnan(logits).any():
            print(f"Warning: NaN detected in logits at step {global_step + batch_idx + 1}")
            continue

        loss = loss_fn(
            logits.view(-1, logits.size(-1)), labels.view(-1)
        )

        # 检查损失是否为NaN
        if torch.isnan(loss) or torch.isinf(loss):
            print(f"Warning: NaN/Inf loss detected at step {global_step + batch_idx + 1}, skipping batch")
            continue

        loss.backward()

        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)

        # 检查梯度是否包含NaN
        has_nan_grad = False
        for param in model.parameters():
            if param.grad is not None and torch.isnan(param.grad).any():
                has_nan_grad = True
                break

        if has_nan_grad:
            print(f"Warning: NaN gradients detected at step {global_step + batch_idx + 1}, skipping update")
            optimizer.zero_grad()
            continue

        optimizer.step()
        scheduler.step()
        total_loss += loss.item()
        pbar.set_postfix(loss=loss.item())

        # 每 eval_steps 步进行一次评估
        if (global_step + batch_idx + 1) % eval_steps == 0:
            print(f"\n=== 在 Step {global_step + batch_idx + 1} 进行评估 ===")

            # 使用整合后的评估函数
            val_loss, bleu_score = evaluate_on_val(
                model, val_dataloader, loss_fn, tokenizer, train_dataset,
                global_step + batch_idx + 1, output_dir, device
            )

            print("=" * 60)
            model.train()  # 重新设置为训练模式

    return total_loss / len(train_dataloader)

def train(
    model,
    optimizer,
    scheduler,
    train_dataloader,
    val_dataloader,
    loss_fn,
    tokenizer,
    train_dataset,
    device,
    epochs,
    eval_steps: int = 100,
    output_dir: str = "results",
    max_grad_norm: float = 1.0
):
    for epoch in range(epochs):
        train_loss = train_one_epoch(
            model, optimizer, scheduler, train_dataloader, val_dataloader, loss_fn,
            tokenizer, train_dataset, device, eval_steps,
            epoch, output_dir, max_grad_norm
        )
        # 计算整个验证集的loss
        model.eval()
        val_total_loss = 0
        with torch.no_grad():
            for batch in val_dataloader:
                src_ids = batch["src_ids"].to(device)
                tgt_ids = batch["tgt_ids"].to(device)
                src_key_padding_mask = batch["src_key_padding_mask"].to(device)
                tgt_key_padding_mask = batch["tgt_key_padding_mask"].to(device)
                
                # 统一损失计算方式
                decoder_input_ids = tgt_ids[:, :-1]
                labels = tgt_ids[:, 1:].contiguous()
                tgt_key_padding_mask_for_model = tgt_key_padding_mask[:, :-1]

                logits = model(
                    src_ids=src_ids,
                    tgt_ids=decoder_input_ids,
                    src_key_padding_mask=src_key_padding_mask,
                    tgt_key_padding_mask=tgt_key_padding_mask_for_model
                )
                loss = loss_fn(
                    logits.view(-1, logits.size(-1)), labels.view(-1)
                )
                val_total_loss += loss.item()
        val_loss = val_total_loss / len(val_dataloader)
        print(f"Epoch {epoch+1}, Train Loss: {train_loss}, Val Loss: {val_loss}")

def load_config(config_path: str = "configs/eval_config.json") -> dict:
    """加载配置文件"""
    if os.path.exists(config_path):
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    else:
        # 默认配置
        return {
            "eval_steps": 5000,
            "batch_size": 32,
            "epochs": 3,
            "max_length": 128,
            "beam_size": 4,
            "output_dir": "results"
        }

def main():
    # 加载配置
    config = load_config()
    EVAL_STEPS = config["eval_steps"]
    BATCH_SIZE = config["batch_size"]
    EPOCHS = config["epochs"]
    MAX_LENGTH = config["max_length"]
    BEAM_SIZE = config["beam_size"]
    OUTPUT_DIR = config["output_dir"]
    
    # 设备配置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    print(f"配置参数: {config}")
    
    # 加载数据集
    train_dataset = MTDataset(
        dataset_path="data/processed/tokenized/train",
        tokenizer_path="data/processed/tokenizer.json",
    )
    val_dataset = MTDataset(
        dataset_path="data/processed/tokenized/val",
        tokenizer_path="data/processed/tokenizer.json",
    )
    tokenizer = Tokenizer.from_file("data/processed/tokenizer.json")
    
    # 创建数据加载器
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size=BATCH_SIZE, 
        num_workers=4, 
        pin_memory=True, 
        shuffle=True, 
        collate_fn=lambda x: collate_fn(x, train_dataset.pad_id)
    )
    val_dataloader = DataLoader(
        val_dataset, 
        batch_size=BATCH_SIZE, 
        num_workers=4, 
        pin_memory=True, 
        shuffle=False, 
        collate_fn=lambda x: collate_fn(x, val_dataset.pad_id)
    )
    
    # 创建模型
    model = Transformer_Seq2Seq(
        d_model=512,
        num_heads=8,
        d_ff=1024,
        num_layers=3,
        max_seq_len=MAX_LENGTH,
        rope_theta=10000.0,
        share_embeddings=True,
        src_vocab_size=tokenizer.get_vocab_size(),
        tgt_vocab_size=tokenizer.get_vocab_size(),
    ).to(device)
    
    # 优化器和调度器
    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-4, weight_decay=1e-5)
    warmup_steps = 1000
    total_steps = len(train_dataloader) * EPOCHS
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer, 
        max_lr=5e-4,
        total_steps=total_steps,
        pct_start=warmup_steps/total_steps,
        anneal_strategy='cos'
    )
    loss_fn = torch.nn.CrossEntropyLoss(
        ignore_index=tokenizer.token_to_id("[PAD]"),
        label_smoothing=0.05
    )
    
    print(f"开始训练，每 {EVAL_STEPS} 步评估一次")
    print(f"结果将保存到: {OUTPUT_DIR}")
    
    # 开始训练
    train(
        model, optimizer, scheduler, train_dataloader, val_dataloader, loss_fn,
        tokenizer, train_dataset, device, EPOCHS, EVAL_STEPS, OUTPUT_DIR, max_grad_norm=1.0
    )

if __name__ == "__main__":
    main()
