#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
WeNet模型训练脚本
用于使用AIshell数据集训练WeNet语音识别模型
"""

import os
import argparse
import torch
import torchaudio
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
import matplotlib.pyplot as plt
from tqdm import tqdm

# 配置参数
parser = argparse.ArgumentParser(description='WeNet模型训练脚本')
parser.add_argument('--data_dir', type=str, default='./data_aishell', help='AIshell数据集路径')
parser.add_argument('--output_dir', type=str, default='./output', help='模型输出路径')
parser.add_argument('--batch_size', type=int, default=16, help='批大小')
parser.add_argument('--epochs', type=int, default=30, help='训练轮次')
parser.add_argument('--lr', type=float, default=0.001, help='学习率')
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', help='训练设备')
args = parser.parse_args()

# 创建输出目录
os.makedirs(args.output_dir, exist_ok=True)

# 数据预处理函数
def preprocess_audio(wav_file, target_sr=16000):
    """
    预处理音频文件
    """
    waveform, sample_rate = torchaudio.load(wav_file)
    if sample_rate != target_sr:
        waveform = torchaudio.transforms.Resample(sample_rate, target_sr)(waveform)
    
    # 转换为单声道
    if waveform.shape[0] > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)
    
    return waveform

def extract_features(waveform, n_mels=80, n_fft=400, hop_length=160):
    """
    提取梅尔频谱特征
    """
    mel_spectrogram = torchaudio.transforms.MelSpectrogram(
        sample_rate=16000,
        n_fft=n_fft,
        hop_length=hop_length,
        n_mels=n_mels
    )(waveform)
    
    # 转换为对数刻度
    log_mel = torch.log(mel_spectrogram + 1e-9)
    
    # 归一化
    mean = torch.mean(log_mel)
    std = torch.std(log_mel)
    normalized_features = (log_mel - mean) / (std + 1e-9)
    
    return normalized_features

# 数据集类
class AIshellDataset(Dataset):
    def __init__(self, data_dir, split='train'):
        """
        AIshell数据集加载器
        """
        self.data_dir = data_dir
        self.split = split
        
        # 加载音频文件和文本标注
        self.wav_dir = os.path.join(data_dir, 'wav', split)
        self.transcript_file = os.path.join(data_dir, 'transcript', f'{split}_transcripts.txt')
        
        # 读取文本标注
        self.samples = []
        with open(self.transcript_file, 'r', encoding='utf-8') as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) >= 2:
                    wav_id = parts[0]
                    text = ' '.join(parts[1:])
                    wav_path = os.path.join(self.wav_dir, wav_id[:6], f'{wav_id}.wav')
                    if os.path.exists(wav_path):
                        self.samples.append((wav_path, text))
    
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, idx):
        wav_path, text = self.samples[idx]
        
        # 加载并预处理音频
        waveform = preprocess_audio(wav_path)
        
        # 提取特征
        features = extract_features(waveform)
        
        # 文本处理
        text_ids = self.text_to_ids(text)
        
        return features, torch.tensor(text_ids)
    
    def text_to_ids(self, text):
        """
        将文本转换为ID序列
        简化版实现，实际应使用完整的词表
        """
        # 简化的字符到ID映射
        char_to_id = {}
        for i, char in enumerate(set(''.join([s[1] for s in self.samples]))):
            char_to_id[char] = i + 1  # 0保留给padding
        
        return [char_to_id.get(char, 0) for char in text]

# 简化的WeNet模型
class SimpleWeNetModel(torch.nn.Module):
    def __init__(self, input_dim=80, hidden_dim=512, output_dim=4000):
        """
        简化版WeNet模型
        """
        super(SimpleWeNetModel, self).__init__()
        
        # 编码器
        self.encoder = torch.nn.Sequential(
            torch.nn.Conv1d(input_dim, hidden_dim, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.BatchNorm1d(hidden_dim),
            torch.nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.BatchNorm1d(hidden_dim),
            torch.nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.BatchNorm1d(hidden_dim)
        )
        
        # 解码器
        self.decoder = torch.nn.Sequential(
            torch.nn.Linear(hidden_dim, hidden_dim),
            torch.nn.ReLU(),
            torch.nn.Dropout(0.1),
            torch.nn.Linear(hidden_dim, output_dim)
        )
    
    def forward(self, x):
        # x: [batch_size, n_mels, time]
        x = self.encoder(x)
        
        # 池化
        x = torch.mean(x, dim=2)  # [batch_size, hidden_dim]
        
        # 解码
        x = self.decoder(x)
        
        return x

# 训练函数
def train(model, train_loader, optimizer, criterion, device):
    """
    训练一个epoch
    """
    model.train()
    total_loss = 0
    
    for features, targets in tqdm(train_loader, desc="Training"):
        features = features.to(device)
        targets = targets.to(device)
        
        # 前向传播
        outputs = model(features)
        loss = criterion(outputs, targets)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
    
    return total_loss / len(train_loader)

# 验证函数
def validate(model, val_loader, criterion, device):
    """
    在验证集上评估模型
    """
    model.eval()
    total_loss = 0
    
    with torch.no_grad():
        for features, targets in tqdm(val_loader, desc="Validation"):
            features = features.to(device)
            targets = targets.to(device)
            
            # 前向传播
            outputs = model(features)
            loss = criterion(outputs, targets)
            
            total_loss += loss.item()
    
    return total_loss / len(val_loader)

# 计算字符错误率
def calculate_cer(predictions, targets, id_to_char):
    """
    计算字符错误率
    """
    total_chars = 0
    total_errors = 0
    
    for pred, target in zip(predictions, targets):
        pred_text = ''.join([id_to_char.get(id.item(), '') for id in pred])
        target_text = ''.join([id_to_char.get(id.item(), '') for id in target])
        
        # 计算编辑距离
        m, n = len(pred_text), len(target_text)
        dp = [[0] * (n + 1) for _ in range(m + 1)]
        
        for i in range(m + 1):
            dp[i][0] = i
        for j in range(n + 1):
            dp[0][j] = j
        
        for i in range(1, m + 1):
            for j in range(1, n + 1):
                if pred_text[i-1] == target_text[j-1]:
                    dp[i][j] = dp[i-1][j-1]
                else:
                    dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
        
        total_errors += dp[m][n]
        total_chars += n
    
    return total_errors / total_chars if total_chars > 0 else 1.0

# 主函数
def main():
    # 加载数据集
    train_dataset = AIshellDataset(args.data_dir, split='train')
    val_dataset = AIshellDataset(args.data_dir, split='dev')
    
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)
    
    # 创建模型
    model = SimpleWeNetModel().to(args.device)
    
    # 定义损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = Adam(model.parameters(), lr=args.lr)
    scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2)
    
    # 训练循环
    train_losses = []
    val_losses = []
    train_cers = []
    val_cers = []
    
    for epoch in range(args.epochs):
        print(f"Epoch {epoch+1}/{args.epochs}")
        
        # 训练
        train_loss = train(model, train_loader, optimizer, criterion, args.device)
        train_losses.append(train_loss)
        
        # 验证
        val_loss = validate(model, val_loader, criterion, args.device)
        val_losses.append(val_loss)
        
        # 更新学习率
        scheduler.step(val_loss)
        
        # 计算CER
        model.eval()
        train_predictions = []
        train_targets = []
        val_predictions = []
        val_targets = []
        
        with torch.no_grad():
            # 采样部分数据计算CER
            for i, (features, targets) in enumerate(train_loader):
                if i >= 10:  # 只使用一部分数据计算CER
                    break
                features = features.to(args.device)
                outputs = model(features)
                predictions = torch.argmax(outputs, dim=1)
                train_predictions.append(predictions)
                train_targets.append(targets)
            
            for i, (features, targets) in enumerate(val_loader):
                if i >= 10:  # 只使用一部分数据计算CER
                    break
                features = features.to(args.device)
                outputs = model(features)
                predictions = torch.argmax(outputs, dim=1)
                val_predictions.append(predictions)
                val_targets.append(targets)
        
        # 构建ID到字符的映射
        id_to_char = {i+1: char for i, char in enumerate(set(''.join([s[1] for s in train_dataset.samples])))}
        id_to_char[0] = ''  # padding
        
        train_cer = calculate_cer(train_predictions, train_targets, id_to_char)
        val_cer = calculate_cer(val_predictions, val_targets, id_to_char)
        
        train_cers.append(train_cer)
        val_cers.append(val_cer)
        
        print(f"Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}")
        print(f"Train CER: {train_cer:.4f}, Val CER: {val_cer:.4f}")
        
        # 保存模型
        torch.save(model.state_dict(), os.path.join(args.output_dir, f'wenet_epoch_{epoch+1}.pt'))
    
    # 保存最终模型
    torch.save(model.state_dict(), os.path.join(args.output_dir, 'wenet_final.pt'))
    
    # 绘制训练曲线
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title('Training and Validation Loss')
    
    plt.subplot(1, 2, 2)
    plt.plot(train_cers, label='Train CER')
    plt.plot(val_cers, label='Val CER')
    plt.xlabel('Epoch')
    plt.ylabel('CER')
    plt.legend()
    plt.title('Training and Validation CER')
    
    plt.tight_layout()
    plt.savefig(os.path.join(args.output_dir, 'training_curves.png'))
    plt.close()
    
    print("Training completed!")

if __name__ == "__main__":
    main()
