# src/train.py

"""
模型训练脚本

用法：
    python src/train.py
    或
    cd src && python train.py
"""

import sys
import os
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import yaml

from src.config_loader import load_config
from src.dataset import HRVDataset
from src.model import ChannelIndependentTransformer, HRVHybridTransformer
from src.utils import save_checkpoint
from src.evaluate import evaluate_model


def train_one_epoch_age(model, data_loader, loss_fn, optimizer, device, gradient_clip=None, use_multimodal=False):
    """
    训练一个epoch（仅年龄任务）
    """
    model.train()
    total_loss = 0.0
    
    progress_bar = tqdm(data_loader, desc="Training")
    
    for batch in progress_bar:
        # 根据是否使用多模态解包数据
        if use_multimodal:
            windows, tabular_features, ages, genders = batch
            tabular_features = tabular_features.to(device)
        else:
            windows, ages, genders = batch
            tabular_features = None
        
        # 移动数据到设备
        windows = windows.to(device)
        ages = ages.to(device).squeeze(-1)
        
        # 前向传播
        if use_multimodal:
            age_preds = model(windows, tabular_features)
        else:
            age_preds = model(windows)
        
        # 计算损失
        loss = loss_fn(age_preds, ages)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        if gradient_clip is not None:
            torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
        
        optimizer.step()
        
        # 累计损失
        total_loss += loss.item()
        
        # 更新进度条
        progress_bar.set_postfix({'loss': loss.item()})
    
    return total_loss / len(data_loader)


def train_one_epoch_gender(model, data_loader, loss_fn, optimizer, device, gradient_clip=None, use_multimodal=False):
    """
    训练一个epoch（仅性别任务）
    """
    model.train()
    total_loss = 0.0
    
    progress_bar = tqdm(data_loader, desc="Training")
    
    for batch in progress_bar:
        # 根据是否使用多模态解包数据
        if use_multimodal:
            windows, tabular_features, ages, genders = batch
            tabular_features = tabular_features.to(device)
        else:
            windows, ages, genders = batch
            tabular_features = None
        
        # 移动数据到设备
        windows = windows.to(device)
        genders = genders.to(device).squeeze(-1).float()
        
        # 前向传播
        if use_multimodal:
            gender_logits = model(windows, tabular_features)
        else:
            gender_logits = model(windows)
        gender_probs = torch.sigmoid(gender_logits)
        
        # 计算损失
        loss = loss_fn(gender_probs, genders)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        if gradient_clip is not None:
            torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
        
        optimizer.step()
        
        # 累计损失
        total_loss += loss.item()
        
        # 更新进度条
        progress_bar.set_postfix({'loss': loss.item()})
    
    return total_loss / len(data_loader)


def train_one_epoch_both(model, data_loader, loss_fn_age, loss_fn_gender, optimizer, device, loss_alpha, gradient_clip=None, use_multimodal=False):
    """
    训练一个epoch（多任务）
    """
    model.train()
    total_loss = 0.0
    
    progress_bar = tqdm(data_loader, desc="Training")
    
    for batch in progress_bar:
        # 根据是否使用多模态解包数据
        if use_multimodal:
            windows, tabular_features, ages, genders = batch
            tabular_features = tabular_features.to(device)
        else:
            windows, ages, genders = batch
            tabular_features = None
        
        # 移动数据到设备
        windows = windows.to(device)
        ages = ages.to(device).squeeze(-1)
        genders = genders.to(device).squeeze(-1).float()
        
        # 前向传播
        if use_multimodal:
            age_preds, gender_logits = model(windows, tabular_features)
        else:
            age_preds, gender_logits = model(windows)
        gender_probs = torch.sigmoid(gender_logits)
        
        # 计算损失
        loss_age = loss_fn_age(age_preds, ages)
        loss_gender = loss_fn_gender(gender_probs, genders)
        combined_loss = loss_alpha * loss_age + (1 - loss_alpha) * loss_gender
        
        # 反向传播
        optimizer.zero_grad()
        combined_loss.backward()
        
        # 梯度裁剪
        if gradient_clip is not None:
            torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
        
        optimizer.step()
        
        # 累计损失
        total_loss += combined_loss.item()
        
        # 更新进度条
        progress_bar.set_postfix({'loss': combined_loss.item()})
    
    return total_loss / len(data_loader)


def main():
    """
    主训练函数
    """
    # 加载配置
    cfg = load_config()
    
    # 获取任务类型和模型类型
    task_type = cfg.get('task', {}).get('type', 'both')
    model_type = cfg.get('model', {}).get('type', 'hybrid')  # 'hybrid' 或 'channel_independent'
    use_multimodal = cfg.get('multimodal', {}).get('enabled', True)
    
    # 打印配置信息
    print("\n" + "="*70)
    print(f"HRV Transformer 训练")
    print(f"  任务类型: {task_type.upper()}")
    print(f"  模型类型: {model_type.upper()}")
    print(f"  多模态: {'启用' if use_multimodal else '禁用'}")
    print("="*70)
    print(yaml.dump(cfg, allow_unicode=True, default_flow_style=False))
    
    # 设置设备
    device = torch.device(cfg['device'])
    print(f"\n🖥️  使用设备: {device}")
    
    if device.type == 'cuda':
        print(f"   GPU: {torch.cuda.get_device_name(0)}")
        print(f"   显存: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
    
    # 创建数据加载器
    print(f"\n" + "="*70)
    print("加载数据集")
    print("="*70)
    
    batch_size = cfg['training']['batch_size']
    num_workers = cfg['training']['num_workers']
    
    train_dataset = HRVDataset(split='train', cfg=cfg, use_multimodal=use_multimodal)
    val_dataset = HRVDataset(split='val', cfg=cfg, use_multimodal=use_multimodal)
    
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True if device.type == 'cuda' else False,
        persistent_workers=True if num_workers > 0 else False
    )
    
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True if device.type == 'cuda' else False,
        persistent_workers=True if num_workers > 0 else False
    )
    
    print(f"\n✅ 数据加载完成")
    print(f"   训练集: {len(train_dataset)} 个样本, {len(train_loader)} 个批次")
    print(f"   验证集: {len(val_dataset)} 个样本, {len(val_loader)} 个批次")
    
    # 创建模型
    print(f"\n" + "="*70)
    print("初始化模型")
    print("="*70)
    
    if model_type == 'hybrid':
        model = HRVHybridTransformer(cfg=cfg, task_type=task_type, use_multimodal=use_multimodal)
        print(f"  使用模型: HRVHybridTransformer (CNN + Transformer混合架构)")
    else:
        model = ChannelIndependentTransformer(cfg=cfg, task_type=task_type)
        print(f"  使用模型: ChannelIndependentTransformer (纯Transformer)")
    
    model = model.to(device)
    
    # 统计模型参数
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"\n📊 模型信息:")
    print(f"  - 任务类型: {task_type}")
    print(f"  - 总参数量: {total_params:,}")
    print(f"  - 可训练参数: {trainable_params:,}")
    print(f"  - 模型大小: {total_params * 4 / 1024 / 1024:.2f} MB (float32)")
    
    # 创建损失函数
    if task_type == 'age':
        loss_fn_age = nn.MSELoss()
    elif task_type == 'gender':
        loss_fn_gender = nn.BCELoss()
    else:  # 'both'
        loss_fn_age = nn.MSELoss()
        loss_fn_gender = nn.BCELoss()
        loss_alpha = cfg['training']['loss_alpha']
    
    # 创建优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=cfg['training']['learning_rate'],
        weight_decay=cfg['training']['weight_decay']
    )
    
    # 创建学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=cfg['training']['lr_scheduler']['factor'],
        patience=cfg['training']['lr_scheduler']['patience'],
        min_lr=cfg['training']['lr_scheduler']['min_lr'],
        verbose=True
    )
    
    # 训练配置
    epochs = cfg['training']['epochs']
    gradient_clip = cfg['training']['gradient_clip']
    early_stopping_patience = cfg['training']['early_stopping']['patience']
    min_delta = cfg['training']['early_stopping']['min_delta']
    
    # 早停变量
    best_val_loss = float('inf')
    epochs_no_improve = 0
    
    # 模型保存路径
    model_save_dir = cfg['output']['model_save_dir']
    os.makedirs(model_save_dir, exist_ok=True)
    model_save_path = os.path.join(model_save_dir, f"best_model_{task_type}.pth")
    
    # 训练循环
    print(f"\n" + "="*70)
    print("开始训练")
    print("="*70)
    
    for epoch in range(epochs):
        print(f"\nEpoch {epoch+1}/{epochs}")
        print("-" * 70)
        
        # 训练一个epoch
        if task_type == 'age':
            train_loss = train_one_epoch_age(
                model, train_loader, loss_fn_age, optimizer, device, gradient_clip, use_multimodal
            )
        elif task_type == 'gender':
            train_loss = train_one_epoch_gender(
                model, train_loader, loss_fn_gender, optimizer, device, gradient_clip, use_multimodal
            )
        else:  # 'both'
            train_loss = train_one_epoch_both(
                model, train_loader, loss_fn_age, loss_fn_gender, 
                optimizer, device, loss_alpha, gradient_clip, use_multimodal
            )
        
        # 验证
        val_metrics = evaluate_model(
            model=model,
            data_loader=val_loader,
            device=device,
            task_type=task_type,
            loss_alpha=loss_alpha if task_type == 'both' else None,
            verbose=False,
            use_multimodal=use_multimodal
        )
        
        val_loss = val_metrics['loss']
        
        # 打印结果
        print(f"\n📈 Epoch {epoch+1} 结果:")
        print(f"  训练Loss:     {train_loss:.4f}")
        print(f"  验证Loss:     {val_loss:.4f}")
        
        if task_type in ['age', 'both']:
            print(f"  年龄MAE:      {val_metrics['age_mae']:.2f} 岁")
            print(f"  年龄RMSE:     {val_metrics['age_rmse']:.2f} 岁")
            print(f"  年龄R²:       {val_metrics['age_r2']:.4f}")
        
        if task_type in ['gender', 'both']:
            print(f"  性别准确率:   {val_metrics['gender_accuracy']:.4f} ({val_metrics['gender_accuracy']*100:.2f}%)")
            print(f"  性别F1:       {val_metrics['gender_f1']:.4f}")
            print(f"  性别AUC:      {val_metrics['gender_auc']:.4f}")
        
        # 学习率调度
        scheduler.step(val_loss)
        current_lr = optimizer.param_groups[0]['lr']
        print(f"  当前学习率:   {current_lr:.6f}")
        
        # 早停检查
        if val_loss < best_val_loss - min_delta:
            best_val_loss = val_loss
            epochs_no_improve = 0
            
            # 保存最佳模型
            save_checkpoint(model.state_dict(), model_save_path)
            print(f"  \n  ✅ 最佳模型已保存到: {model_save_path}")
        else:
            epochs_no_improve += 1
            print(f"  ⚠️  验证loss未改进 ({epochs_no_improve}/{early_stopping_patience})")
        
        # 早停
        if epochs_no_improve >= early_stopping_patience:
            print(f"\n⛔ 早停触发！已连续 {early_stopping_patience} 个epoch验证loss未改进")
            print(f"   最佳验证Loss: {best_val_loss:.4f}")
            break
    
    print(f"\n" + "="*70)
    print("训练完成！")
    print("="*70)
    print(f"✅ 最佳模型保存在: {model_save_path}")
    print(f"✅ 最佳验证Loss: {best_val_loss:.4f}")
    
    print(f"\n💡 下一步:")
    print(f"   python src/evaluate.py --split test --checkpoint {model_save_path}")


if __name__ == "__main__":
    main()
