"""
训练脚本 - 训练频谱分析Transformer模型
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
import os
import json
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt

from data_generator import SpectrumDataGenerator, SignalLabel
from spectrum_transformer import SpectrumTransformer, SpectrumTransformerLoss


class SpectrumDataset(Dataset):
    """频谱数据集"""
    
    def __init__(self, spectra: np.ndarray, labels: List[SignalLabel], max_freq: float = 2560.0):
        self.spectra = torch.FloatTensor(spectra)
        self.labels = labels
        self.max_freq = max_freq
        
    def __len__(self):
        return len(self.spectra)
    
    def __getitem__(self, idx):
        spectrum = self.spectra[idx]
        label = self.labels[idx]
        
        # 将标签转换为模型需要的格式
        target = self._label_to_tensor(label)
        
        return spectrum, target
    
    def _label_to_tensor(self, label: SignalLabel) -> Dict[str, torch.Tensor]:
        """将SignalLabel转换为张量格式"""
        
        # 谐波检测 - 形状: (1,) -> stack后 (batch, 1)
        has_harmonic = torch.FloatTensor([1.0 if label.has_harmonic else 0.0])
        
        # 谐波簇数量 - 标量 -> stack后 (batch,)
        num_clusters = len(label.harmonic_clusters) if label.has_harmonic else 0
        num_harmonic_clusters = torch.tensor(num_clusters, dtype=torch.long)
        
        # 谐波簇参数 (最多3个簇) - 形状: (3, 2)
        harmonic_cluster_params = torch.zeros(3, 2)  # (max_clusters, 2)
        for i, cluster in enumerate(label.harmonic_clusters[:3]):
            harmonic_cluster_params[i, 0] = cluster['base_freq']
            harmonic_cluster_params[i, 1] = cluster['num_harmonics']
        
        # 调制检测 - 形状: (1,) -> stack后 (batch, 1)
        has_modulation = torch.FloatTensor([1.0 if label.has_modulation else 0.0])
        
        # 调制数量 - 标量 -> stack后 (batch,)
        num_mods = len(label.modulation_info) if label.has_modulation else 0
        num_modulations = torch.tensor(num_mods, dtype=torch.long)
        
        # 调制参数 (最多2个调制) - 形状: (2, 3)
        modulation_params = torch.zeros(2, 3)  # (max_modulations, 3)
        for i, mod in enumerate(label.modulation_info[:2]):
            modulation_params[i, 0] = mod['carrier_freq']
            modulation_params[i, 1] = mod['modulation_freq']
            modulation_params[i, 2] = mod['num_sidebands']
        
        # 轴承故障 - 形状: (1,)
        has_bearing_fault = torch.FloatTensor([1.0 if label.has_bearing_fault else 0.0])
        
        return {
            'has_harmonic': has_harmonic,
            'num_harmonic_clusters': num_harmonic_clusters,
            'harmonic_cluster_params': harmonic_cluster_params,
            'has_modulation': has_modulation,
            'num_modulations': num_modulations,
            'modulation_params': modulation_params,
            'has_bearing_fault': has_bearing_fault
        }


def collate_fn(batch):
    """自定义批处理函数"""
    spectra = torch.stack([item[0] for item in batch])
    
    # 合并所有目标
    targets = {}
    for key in batch[0][1].keys():
        targets[key] = torch.stack([item[1][key] for item in batch])
    
    return spectra, targets


def generate_datasets(train_size: int = 10000, 
                     val_size: int = 2000,
                     test_size: int = 1000,
                     save_dir: str = 'datasets'):
    """生成训练、验证和测试数据集"""
    
    os.makedirs(save_dir, exist_ok=True)
    
    print("开始生成数据集...")
    generator = SpectrumDataGenerator(fs=5120, n_samples=16384)
    
    # 生成训练集
    print(f"\n生成训练集 ({train_size} 样本)...")
    train_spectra, train_labels = generator.generate_batch(train_size)
    train_file = os.path.join(save_dir, 'train_data.npz')
    np.savez_compressed(train_file, 
                       spectra=train_spectra, 
                       labels=[label.to_dict() for label in train_labels])
    print(f"训练集已保存: {train_file}")
    
    # 生成验证集
    print(f"\n生成验证集 ({val_size} 样本)...")
    val_spectra, val_labels = generator.generate_batch(val_size)
    val_file = os.path.join(save_dir, 'val_data.npz')
    np.savez_compressed(val_file,
                       spectra=val_spectra,
                       labels=[label.to_dict() for label in val_labels])
    print(f"验证集已保存: {val_file}")
    
    # 生成测试集
    print(f"\n生成测试集 ({test_size} 样本)...")
    test_spectra, test_labels = generator.generate_batch(test_size)
    test_file = os.path.join(save_dir, 'test_data.npz')
    np.savez_compressed(test_file,
                       spectra=test_spectra,
                       labels=[label.to_dict() for label in test_labels])
    print(f"测试集已保存: {test_file}")
    
    return train_file, val_file, test_file


def load_data_from_file(filepath: str) -> Tuple[np.ndarray, List[SignalLabel]]:
    """从文件加载数据"""
    data = np.load(filepath, allow_pickle=True)
    spectra = data['spectra']
    labels = [SignalLabel(**label_dict) for label_dict in data['labels']]
    return spectra, labels


def train_epoch(model, dataloader, criterion, optimizer, device):
    """训练一个epoch"""
    model.train()
    total_loss = 0
    loss_dict_sum = {}
    
    pbar = tqdm(dataloader, desc='Training')
    for spectra, targets in pbar:
        spectra = spectra.to(device)
        targets = {k: v.to(device) for k, v in targets.items()}
        
        # 前向传播
        predictions = model(spectra)
        
        # 计算损失
        loss, loss_dict = criterion(predictions, targets)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 累积损失
        total_loss += loss.item()
        for key, value in loss_dict.items():
            loss_dict_sum[key] = loss_dict_sum.get(key, 0) + value
        
        # 更新进度条
        pbar.set_postfix({'loss': f'{loss.item():.4f}'})
    
    # 计算平均损失
    avg_loss = total_loss / len(dataloader)
    avg_loss_dict = {k: v / len(dataloader) for k, v in loss_dict_sum.items()}
    
    return avg_loss, avg_loss_dict


def validate(model, dataloader, criterion, device):
    """验证"""
    model.eval()
    total_loss = 0
    loss_dict_sum = {}
    
    with torch.no_grad():
        pbar = tqdm(dataloader, desc='Validation')
        for spectra, targets in pbar:
            spectra = spectra.to(device)
            targets = {k: v.to(device) for k, v in targets.items()}
            
            # 前向传播
            predictions = model(spectra)
            
            # 计算损失
            loss, loss_dict = criterion(predictions, targets)
            
            # 累积损失
            total_loss += loss.item()
            for key, value in loss_dict.items():
                loss_dict_sum[key] = loss_dict_sum.get(key, 0) + value
            
            pbar.set_postfix({'loss': f'{loss.item():.4f}'})
    
    avg_loss = total_loss / len(dataloader)
    avg_loss_dict = {k: v / len(dataloader) for k, v in loss_dict_sum.items()}
    
    return avg_loss, avg_loss_dict


def train_model(train_file: str,
                val_file: str,
                num_epochs: int = 50,
                batch_size: int = 32,
                learning_rate: float = 1e-4,
                save_dir: str = 'checkpoints'):
    """训练模型"""
    
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    if torch.cuda.is_available():
        print(f"GPU: {torch.cuda.get_device_name(0)}")
        print(f"显存: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
    
    # 加载数据
    print("\n加载数据...")
    train_spectra, train_labels = load_data_from_file(train_file)
    val_spectra, val_labels = load_data_from_file(val_file)
    
    print(f"训练集大小: {len(train_spectra)}")
    print(f"验证集大小: {len(val_spectra)}")
    print(f"频谱长度: {train_spectra.shape[1]}")
    
    # 创建数据集和数据加载器
    train_dataset = SpectrumDataset(train_spectra, train_labels)
    val_dataset = SpectrumDataset(val_spectra, val_labels)
    
    train_loader = DataLoader(
        train_dataset, 
        batch_size=batch_size, 
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=0  # Windows上设置为0
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=0
    )
    
    # 创建模型
    print("\n创建模型...")
    model = SpectrumTransformer(
        spectrum_len=8192,
        d_model=256,
        nhead=8,
        num_encoder_layers=4,
        dim_feedforward=1024,
        dropout=0.1
    ).to(device)
    
    print(f"模型参数数量: {model.count_parameters():,}")
    
    # 损失函数和优化器
    criterion = SpectrumTransformerLoss()
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.01)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
    
    # 训练历史
    history = {
        'train_loss': [],
        'val_loss': [],
        'lr': []
    }
    
    best_val_loss = float('inf')
    
    # 训练循环
    print(f"\n开始训练 ({num_epochs} epochs)...")
    for epoch in range(num_epochs):
        print(f"\nEpoch {epoch+1}/{num_epochs}")
        print("-" * 50)
        
        # 训练
        train_loss, train_loss_dict = train_epoch(model, train_loader, criterion, optimizer, device)
        
        # 验证
        val_loss, val_loss_dict = validate(model, val_loader, criterion, device)
        
        # 更新学习率
        scheduler.step()
        current_lr = optimizer.param_groups[0]['lr']
        
        # 记录历史
        history['train_loss'].append(train_loss)
        history['val_loss'].append(val_loss)
        history['lr'].append(current_lr)
        
        # 打印结果
        print(f"\n训练损失: {train_loss:.4f}")
        print(f"验证损失: {val_loss:.4f}")
        print(f"学习率: {current_lr:.6f}")
        
        print("\n详细损失:")
        for key in train_loss_dict.keys():
            if key != 'total':
                print(f"  {key:20s} - 训练: {train_loss_dict[key]:.4f}, 验证: {val_loss_dict[key]:.4f}")
        
        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model_path = os.path.join(save_dir, 'best_model.pth')
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_loss': val_loss,
            }, best_model_path)
            print(f"\n✓ 保存最佳模型 (验证损失: {val_loss:.4f})")
        
        # 定期保存检查点
        if (epoch + 1) % 10 == 0:
            checkpoint_path = os.path.join(save_dir, f'checkpoint_epoch_{epoch+1}.pth')
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_loss': val_loss,
            }, checkpoint_path)
            print(f"✓ 保存检查点: {checkpoint_path}")
    
    # 保存最终模型
    final_model_path = os.path.join(save_dir, 'final_model.pth')
    torch.save({
        'epoch': num_epochs,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'val_loss': val_loss,
    }, final_model_path)
    print(f"\n✓ 保存最终模型: {final_model_path}")
    
    # 保存训练历史
    history_path = os.path.join(save_dir, 'training_history.json')
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=2)
    print(f"✓ 保存训练历史: {history_path}")
    
    # 绘制训练曲线
    plot_training_history(history, save_dir)
    
    return model, history


def plot_training_history(history: Dict, save_dir: str):
    """绘制训练曲线"""
    plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS']
    plt.rcParams['axes.unicode_minus'] = False
    
    fig, axes = plt.subplots(1, 2, figsize=(14, 5))
    
    epochs = range(1, len(history['train_loss']) + 1)
    
    # 损失曲线
    axes[0].plot(epochs, history['train_loss'], 'b-', label='训练损失', linewidth=2)
    axes[0].plot(epochs, history['val_loss'], 'r-', label='验证损失', linewidth=2)
    axes[0].set_xlabel('Epoch', fontsize=12)
    axes[0].set_ylabel('损失', fontsize=12)
    axes[0].set_title('训练和验证损失', fontsize=14, fontweight='bold')
    axes[0].legend()
    axes[0].grid(True, alpha=0.3)
    
    # 学习率曲线
    axes[1].plot(epochs, history['lr'], 'g-', linewidth=2)
    axes[1].set_xlabel('Epoch', fontsize=12)
    axes[1].set_ylabel('学习率', fontsize=12)
    axes[1].set_title('学习率变化', fontsize=14, fontweight='bold')
    axes[1].grid(True, alpha=0.3)
    axes[1].set_yscale('log')
    
    plt.tight_layout()
    save_path = os.path.join(save_dir, 'training_curves.png')
    plt.savefig(save_path, dpi=150, bbox_inches='tight')
    print(f"✓ 训练曲线已保存: {save_path}")
    plt.close()


if __name__ == "__main__":
    # 生成数据集
    print("=" * 60)
    print("频谱分析Transformer - 训练脚本")
    print("=" * 60)
    
    # 检查是否已有数据集
    train_file = 'datasets/train_data.npz'
    val_file = 'datasets/val_data.npz'
    test_file = 'datasets/test_data.npz'
    
    if not (os.path.exists(train_file) and os.path.exists(val_file) and os.path.exists(test_file)):
        print("\n数据集不存在，开始生成...")
        train_file, val_file, test_file = generate_datasets(
            train_size=10000,
            val_size=2000,
            test_size=1000
        )
    else:
        print("\n使用已存在的数据集")
    
    # 训练模型
    model, history = train_model(
        train_file=train_file,
        val_file=val_file,
        num_epochs=50,
        batch_size=32,
        learning_rate=1e-4
    )
    
    print("\n" + "=" * 60)
    print("训练完成！")
    print("=" * 60)

