import torch
import torch.nn as nn
from torch.nn.functional import dropout
from tqdm import tqdm
import math
import numpy as np
import sentencepiece as spm
from feature_extract import ResNetFeatureExtractor
from models import Encoder, Decoder, Transformer
from dataloader import Emilia, DynamicBatchSampler, collate_fn
from torch.utils.data import DataLoader
from torch_optimizer import Lamb
from pathlib import Path
import os
from torch.optim.adamw import AdamW
import torchaudio
import torch.nn.functional as F
import argparse

os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

# 初始化模型
def init_model(vocab_size, enc_dim, num_enc_layers, num_dec_layers, dropout):
    """
    初始化语音识别数据集
    参数：
        vocab_size: 词表长度
        enc_dim: 输入编码器的维度
        num_enc_layers: 编码器层数
        num_dec_layers: 解码器层数
        feature_extractor_type: 特征提取方法(linear/resnet)
    """
    fbank_dim = 80  # 梅尔频谱维度
    num_heads = enc_dim // 64  # 多头注意力头数计算（每64维分配一个头）
    max_seq_len = 30  # 最大输入长度（秒）
    max_seq_len=max_seq_len*100 # 转换为fbank特征后长度

    # 特征提取
    feature_extractor = ResNetFeatureExtractor(in_dim=fbank_dim, out_dim=enc_dim)

    # 编码器配置
    encoder = Encoder(
        dropout_emb=dropout,  # 嵌入层dropout
        dropout_posffn=dropout,  # 前馈层dropout
        dropout_attn=dropout,  # 注意力层dropout
        num_layers=num_enc_layers,  # 编码层数
        enc_dim=enc_dim,  # 编码维度
        num_heads=num_heads,  # 注意力头数
        dff=2048,  # 前馈层维度
        tgt_len=max_seq_len  # 最大序列长度
    )

    # 解码器配置
    decoder = Decoder(
        dropout_emb=dropout,
        dropout_posffn=dropout,
        dropout_attn=dropout,
        num_layers=num_dec_layers,  # 解码层数
        dec_dim=enc_dim,  # 解码维度与编码维度相同
        num_heads=num_heads,
        dff=2048,
        tgt_len=max_seq_len,
        tgt_vocab_size=vocab_size  # 词汇表大小
    )

    # 组合成完整Transformer模型
    model = Transformer(
        feature_extractor,  # 声学特征处理
        encoder,  # 声学特征编码
        decoder,  # 文本生成解码
        enc_dim,  # 维度一致性校验
        vocab_size  # 输出词汇量
    )
    return model

def save_checkpoint(model, optimizer, epoch, loss, path):
    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
    }, path)

def load_checkpoint(model, optimizer, checkpoint_path):
    """
    从检查点恢复训练状态
    """
    if not os.path.exists(checkpoint_path):
        print(f"检查点文件不存在: {checkpoint_path}")
        return 0, float('inf')
    
    print(f"从检查点恢复训练: {checkpoint_path}")
    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch'] + 1
    best_val_loss = checkpoint.get('loss', float('inf'))
    
    print(f"恢复训练状态: epoch={checkpoint['epoch']}, loss={checkpoint['loss']:.4f}")
    return start_epoch, best_val_loss


if __name__ == '__main__':
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='语音识别模型训练')
    parser.add_argument('--resume', type=str, default='', help='检查点路径，用于恢复训练')
    args = parser.parse_args()
    # 加载分词器
    model_file = "./tokenizer/chinese.model"
    chinese_sp = spm.SentencePieceProcessor(model_file=model_file)

    # 初始化模型
    vocab_size = chinese_sp.vocab_size()
    print("vocab_size from tokenizer:", chinese_sp.vocab_size())
    enc_dim = 256  # 输入encoder维度(特征提取变换维度)
    num_enc_layers = 12 # encoder深度
    num_dec_layers = 6 # decoder深度
    dropout = 0.1
    model = init_model(vocab_size, enc_dim, num_enc_layers, num_dec_layers, dropout)

    # 数据加载
    data_dir = Path("./data/Emilia-YODAS/ZH")
    sample_rate = 16000
    batch_time = 24  # 一个batch多少秒
    # 划分数据集
    train_ratio=0.8 # 训练集比例
    val_ratio=0.1 # 验证集比例
    test_ratio=0.1 # 测试集比例
    seed=42 # 随机种子

    train_files, val_files, test_files = Emilia.split_dataset(data_dir, train_ratio=train_ratio, val_ratio=val_ratio, test_ratio=test_ratio, seed=seed)
    train_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=train_files)
    val_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=val_files)
    test_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=test_files)
    train_loader = DataLoader(
        train_set,
        batch_sampler=DynamicBatchSampler(train_set, max_frames=batch_time * sample_rate, shuffle=True),
        num_workers=4,
        collate_fn=collate_fn,
    )
    val_loader = DataLoader(
        val_set,
        batch_sampler=DynamicBatchSampler(val_set, max_frames=batch_time * sample_rate, shuffle=False),
        num_workers=2,
        collate_fn=collate_fn,
    )
    test_loader = DataLoader(
        test_set,
        batch_sampler=DynamicBatchSampler(test_set, max_frames=batch_time * sample_rate, shuffle=False),
        num_workers=2,
        collate_fn=collate_fn,
    )

    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 损失函数配置
    criterion = nn.CrossEntropyLoss(
        ignore_index=0,  # pad_id=0
        label_smoothing=0.1  # 标签平滑
    )

    # 优化器配置
    max_lr = 5e-2  # 降低学习率
    num_epochs = 50  # 增加训练轮数
    num_warmup = 10000
    steps_per_epoch = len(train_loader)
    pcb = num_warmup / (steps_per_epoch * num_epochs)
    div_factor = 25
    optimizer = Lamb(
        model.parameters(),
        lr=max_lr / div_factor,
        betas=(0.9, 0.98),
        weight_decay=0.03,
        eps=1e-5
    )
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=max_lr,
        steps_per_epoch=steps_per_epoch,
        epochs=num_epochs,
        pct_start=pcb,
        div_factor=div_factor,
        final_div_factor=1e4,
        anneal_strategy='linear'
    )

    # 检查是否需要从检查点恢复训练
    start_epoch = 1
    best_val_loss = float('inf')  # 初始化best_val_loss
    if args.resume:
        start_epoch, best_val_loss = load_checkpoint(model, optimizer, args.resume)
        print(f"从epoch {start_epoch}开始恢复训练")

    # 创建保存目录
    save_dir = "./ckpts"
    os.makedirs(save_dir, exist_ok=True)

    for epoch in range(start_epoch, num_epochs + 1):
        model.train()
        train_loss = 0
        # 为每个epoch创建单独的进度条
        pbar = tqdm(train_loader, desc=f"Epoch {epoch} [Train]", unit="batch", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]")
        for i, batch in enumerate(pbar, start=1):
            audio = batch["audio"].to(device)
            lengths = batch["lengths"].to(device)
            pad_id = 0  # pad id固定为0
            unk_id = 1  # unk id固定为1
            bos_id = 2  # bos id固定为2
            eos_id = 3  # eos id固定为3
            
            # 正确处理文本编码，添加BOS和EOS token
            text = []
            for s in batch["text"]:
                tokens = chinese_sp.encode_as_ids(s)
                tokens = [t if t >= 0 else unk_id for t in tokens]
                # 添加BOS和EOS token
                tokens = [bos_id] + tokens + [eos_id]
                text.append(tokens)
            
            max_len = max(len(t) for t in text)
            text_tensor = torch.full((len(text), max_len), pad_id, dtype=torch.long)
            for j, t in enumerate(text):
                text_tensor[j, :len(t)] = torch.tensor(t)
            text_tensor = text_tensor.to(device)

            optimizer.zero_grad()
            logits = model(audio, lengths, text_tensor[:, :-1])
            loss = criterion(logits.reshape(-1, logits.size(-1)), text_tensor[:, 1:].reshape(-1))
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
            optimizer.step()
            scheduler.step()
            train_loss += loss.item()
            pbar.set_postfix({
                "loss": f"{train_loss / i:.4f}",
                "lr": f"{optimizer.param_groups[0]['lr']:.6f}"
            })
        train_loss /= len(train_loader)

        # 验证
        model.eval()
        val_loss = 0
        pbar_val = tqdm(val_loader, desc=f"Epoch {epoch} [Val]", unit="batch", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]")
        with torch.no_grad():
            for i, batch in enumerate(pbar_val, start=1):
                audio = batch["audio"].to(device)
                lengths = batch["lengths"].to(device)
                # 正确处理文本编码，添加BOS和EOS token
                text = []
                for s in batch["text"]:
                    tokens = chinese_sp.encode_as_ids(s)
                    tokens = [t if t >= 0 else unk_id for t in tokens]
                    # 添加BOS和EOS token
                    tokens = [bos_id] + tokens + [eos_id]
                    text.append(tokens)
                
                max_len = max(len(t) for t in text)
                text_tensor = torch.full((len(text), max_len), pad_id, dtype=torch.long)
                for j, t in enumerate(text):
                    text_tensor[j, :len(t)] = torch.tensor(t)
                text_tensor = text_tensor.to(device)

                logits = model(audio, lengths, text_tensor[:, :-1])
                loss = criterion(logits.reshape(-1, logits.size(-1)), text_tensor[:, 1:].reshape(-1))
                val_loss += loss.item()
                pbar_val.set_postfix({
                    "loss": f"{val_loss / i:.4f}"
                })
            val_loss /= len(val_loader)

        print(f"Epoch {epoch}: train_loss={train_loss:.4f}, val_loss={val_loss:.4f}")

        # 保存最优模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            save_checkpoint(model, optimizer, epoch, val_loss, os.path.join(save_dir, "best_model.pt"))
            print("Best model saved.")

        # 定期保存
        if epoch % 1 == 0:
            save_checkpoint(model, optimizer, epoch, val_loss, os.path.join(save_dir, f"checkpoint_epoch{epoch}.pt"))

        # 从检查点恢复训练
        # python train.py --resume ./ckpts/checkpoint_epoch12.pt
        # python train.py --resume ./ckpts/best_model.pt