"""
三维电磁场预测模型训练脚本
"""

import os
import sys
import argparse
import time
import logging
from datetime import datetime

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import wandb

# 添加项目路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from em_generation.models.em_predictor import EMFieldPredictor
from em_generation.models.loss_functions import MHDLoss, MaxwellLoss
from em_generation.data.data_utils import create_dataloader, save_predictions
from em_generation.utils import setup_logging, save_checkpoint, load_checkpoint


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='训练三维电磁场预测模型')
    
    # 数据参数
    parser.add_argument('--data_dir', type=str, default='./data', help='数据目录')
    parser.add_argument('--train_dir', type=str, default='./data/train', help='训练数据目录')
    parser.add_argument('--val_dir', type=str, default='./data/val', help='验证数据目录')
    parser.add_argument('--batch_size', type=int, default=1, help='批次大小')
    parser.add_argument('--num_workers', type=int, default=0, help='数据加载器工作进程数')
    
    # 模型参数
    parser.add_argument('--model_type', type=str, default='mhd', choices=['mhd', 'maxwell'], help='模型类型')
    parser.add_argument('--modes', type=int, default=8, help='傅里叶模式数')
    parser.add_argument('--width', type=int, default=32, help='网络宽度')
    parser.add_argument('--n_layers', type=int, default=4, help='网络层数')
    parser.add_argument('--input_dim', type=int, default=7, help='输入维度')
    parser.add_argument('--output_dim', type=int, default=4, help='输出维度')
    
    # 训练参数
    parser.add_argument('--epochs', type=int, default=100, help='训练轮数')
    parser.add_argument('--lr', type=float, default=1e-3, help='学习率')
    parser.add_argument('--weight_decay', type=float, default=1e-5, help='权重衰减')
    parser.add_argument('--scheduler', type=str, default='cosine', choices=['none', 'step', 'cosine'], help='学习率调度器')
    parser.add_argument('--step_size', type=int, default=30, help='学习率衰减步长')
    parser.add_argument('--gamma', type=float, default=0.5, help='学习率衰减因子')
    
    # 损失函数参数
    parser.add_argument('--loss_type', type=str, default='l2', choices=['l1', 'l2', 'lp'], help='损失函数类型')
    parser.add_argument('--p', type=float, default=2, help='Lp损失的p值')
    
    # 物理参数
    parser.add_argument('--nu', type=float, default=0.01, help='粘性系数')
    parser.add_argument('--eta', type=float, default=0.01, help='磁扩散系数')
    parser.add_argument('--rho', type=float, default=1.0, help='密度')
    
    # 损失权重
    parser.add_argument('--data_weight', type=float, default=1.0, help='数据损失权重')
    parser.add_argument('--ic_weight', type=float, default=1.0, help='初始条件损失权重')
    parser.add_argument('--pde_weight', type=float, default=0.1, help='PDE损失权重')
    parser.add_argument('--constraint_weight', type=float, default=0.1, help='约束损失权重')
    
    # 检查点和日志
    parser.add_argument('--output_dir', type=str, default './outputs', help='输出目录')
    parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints', help='检查点目录')
    parser.add_argument('--log_dir', type=str, default='./logs', help='日志目录')
    parser.add_argument('--save_freq', type=int, default=10, help='保存检查点频率')
    parser.add_argument('--eval_freq', type=int, default=5, help='评估频率')
    parser.add_argument('--log_freq', type=int, default=10, help='日志记录频率')
    
    # 其他参数
    parser.add_argument('--device', type=str, default='auto', choices=['auto', 'cpu', 'cuda'], help='计算设备')
    parser.add_argument('--resume', type=str, default=None, help='恢复训练的检查点路径')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')
    parser.add_argument('--use_wandb', action='store_true', help='使用Weights & Biases记录')
    parser.add_argument('--project_name', type=str, default='em_field_prediction', help='项目名称')
    parser.add_argument('--run_name', type=str, default=None, help='运行名称')
    
    return parser.parse_args()


def setup_device(device):
    """设置计算设备"""
    if device == 'auto':
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    print(f"使用设备: {device}")
    return device


def create_model(args):
    """创建模型"""
    model = EMFieldPredictor(
        modes=args.modes,
        width=args.width,
        n_layers=args.n_layers,
        input_dim=args.input_dim,
        output_dim=args.output_dim
    )
    
    # 计算参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"模型参数总数: {total_params:,}")
    print(f"可训练参数数: {trainable_params:,}")
    
    return model


def create_loss_function(args):
    """创建损失函数"""
    if args.model_type == 'mhd':
        loss_fn = MHDLoss(
            loss_type=args.loss_type,
            p=args.p,
            nu=args.nu,
            eta=args.eta,
            rho=args.rho,
            data_weight=args.data_weight,
            ic_weight=args.ic_weight,
            pde_weight=args.pde_weight,
            constraint_weight=args.constraint_weight
        )
    else:  # maxwell
        loss_fn = MaxwellLoss(
            loss_type=args.loss_type,
            p=args.p,
            data_weight=args.data_weight,
            ic_weight=args.ic_weight,
            pde_weight=args.pde_weight,
            constraint_weight=args.constraint_weight
        )
    
    return loss_fn


def create_optimizer(model, args):
    """创建优化器"""
    optimizer = optim.Adam(
        model.parameters(),
        lr=args.lr,
        weight_decay=args.weight_decay
    )
    
    return optimizer


def create_scheduler(optimizer, args):
    """创建学习率调度器"""
    if args.scheduler == 'none':
        scheduler = None
    elif args.scheduler == 'step':
        scheduler = optim.lr_scheduler.StepLR(
            optimizer,
            step_size=args.step_size,
            gamma=args.gamma
        )
    elif args.scheduler == 'cosine':
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=args.epochs
        )
    else:
        raise ValueError(f"Unknown scheduler: {args.scheduler}")
    
    return scheduler


def train_epoch(model, dataloader, loss_fn, optimizer, device, args, epoch):
    """训练一个epoch"""
    model.train()
    total_loss = 0.0
    num_batches = len(dataloader)
    
    for batch_idx, (inputs, targets) in enumerate(dataloader):
        # 移动数据到设备
        inputs = inputs.to(device)
        targets = targets.to(device)
        
        # 前向传播
        outputs = model(inputs)
        
        # 计算损失
        losses = loss_fn(inputs, outputs, targets)
        loss = losses['total_loss']
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 累计损失
        total_loss += loss.item()
        
        # 记录日志
        if batch_idx % args.log_freq == 0:
            logging.info(
                f"Epoch {epoch} [{batch_idx}/{num_batches}] "
                f"Loss: {loss.item():.6f} "
                f"Data: {losses['data_loss'].item():.6f} "
                f"IC: {losses['ic_loss'].item():.6f} "
                f"PDE: {losses['pde_loss'].item():.6f} "
                f"Constraint: {losses['constraint_loss'].item():.6f}"
            )
            
            # 使用WandB记录
            if args.use_wandb:
                wandb.log({
                    "train/batch_loss": loss.item(),
                    "train/data_loss": losses['data_loss'].item(),
                    "train/ic_loss": losses['ic_loss'].item(),
                    "train/pde_loss": losses['pde_loss'].item(),
                    "train/constraint_loss": losses['constraint_loss'].item(),
                    "epoch": epoch,
                    "batch": batch_idx
                })
    
    avg_loss = total_loss / num_batches
    return avg_loss


def evaluate(model, dataloader, loss_fn, device, args, epoch):
    """评估模型"""
    model.eval()
    total_loss = 0.0
    num_batches = len(dataloader)
    
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(dataloader):
            # 移动数据到设备
            inputs = inputs.to(device)
            targets = targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            
            # 计算损失
            losses = loss_fn(inputs, outputs, targets)
            loss = losses['total_loss']
            
            # 累计损失
            total_loss += loss.item()
    
    avg_loss = total_loss / num_batches
    logging.info(f"Epoch {epoch} - Validation Loss: {avg_loss:.6f}")
    
    # 使用WandB记录
    if args.use_wandb:
        wandb.log({
            "val/loss": avg_loss,
            "epoch": epoch
        })
    
    return avg_loss


def save_predictions_and_visualizations(model, dataloader, device, args, epoch):
    """保存预测结果和可视化"""
    model.eval()
    
    # 创建输出目录
    output_dir = os.path.join(args.output_dir, f'epoch_{epoch}')
    os.makedirs(output_dir, exist_ok=True)
    
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(dataloader):
            if batch_idx >= 5:  # 只保存前5个批次
                break
            
            # 移动数据到设备
            inputs = inputs.to(device)
            targets = targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            
            # 保存预测结果
            save_predictions(
                inputs, targets, outputs,
                os.path.join(output_dir, f'batch_{batch_idx}'),
                sample_idx=0
            )


def main():
    """主函数"""
    # 解析参数
    args = parse_args()
    
    # 设置随机种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    
    # 设置设备
    device = setup_device(args.device)
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    os.makedirs(args.checkpoint_dir, exist_ok=True)
    os.makedirs(args.log_dir, exist_ok=True)
    
    # 设置日志
    setup_logging(os.path.join(args.log_dir, f'train_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'))
    
    # 初始化WandB
    if args.use_wandb:
        run_name = args.run_name or f"{args.model_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        wandb.init(
            project=args.project_name,
            name=run_name,
            config=args.__dict__
        )
    
    # 创建数据加载器
    logging.info("创建数据加载器...")
    train_dataloader, _ = create_dataloader(
        data_dir=args.train_dir,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        distributed=False,
        normalize=True,
        train=True
    )
    
    val_dataloader, _ = create_dataloader(
        data_dir=args.val_dir,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        distributed=False,
        normalize=True,
        train=False
    )
    
    logging.info(f"训练样本数: {len(train_dataloader.dataset)}")
    logging.info(f"验证样本数: {len(val_dataloader.dataset)}")
    
    # 创建模型
    logging.info("创建模型...")
    model = create_model(args)
    model = model.to(device)
    
    # 创建损失函数
    logging.info("创建损失函数...")
    loss_fn = create_loss_function(args)
    
    # 创建优化器
    logging.info("创建优化器...")
    optimizer = create_optimizer(model, args)
    
    # 创建学习率调度器
    logging.info("创建学习率调度器...")
    scheduler = create_scheduler(optimizer, args)
    
    # 恢复训练
    start_epoch = 0
    best_val_loss = float('inf')
    if args.resume:
        logging.info(f"从检查点恢复训练: {args.resume}")
        checkpoint = load_checkpoint(args.resume, device)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if scheduler is not None and 'scheduler_state_dict' in checkpoint:
            scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        start_epoch = checkpoint['epoch'] + 1
        best_val_loss = checkpoint.get('best_val_loss', float('inf'))
        logging.info(f"恢复到epoch {start_epoch}, 最佳验证损失: {best_val_loss:.6f}")
    
    # 训练循环
    logging.info("开始训练...")
    for epoch in range(start_epoch, args.epochs):
        start_time = time.time()
        
        # 训练
        train_loss = train_epoch(model, train_dataloader, loss_fn, optimizer, device, args, epoch)
        
        # 更新学习率
        if scheduler is not None:
            scheduler.step()
        
        # 评估
        if epoch % args.eval_freq == 0:
            val_loss = evaluate(model, val_dataloader, loss_fn, device, args, epoch)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                save_checkpoint(
                    model, optimizer, scheduler, epoch, best_val_loss,
                    os.path.join(args.checkpoint_dir, 'best_model.pth')
                )
                logging.info(f"保存最佳模型，验证损失: {best_val_loss:.6f}")
            
            # 保存预测结果和可视化
            save_predictions_and_visualizations(model, val_dataloader, device, args, epoch)
        
        # 定期保存检查点
        if epoch % args.save_freq == 0:
            save_checkpoint(
                model, optimizer, scheduler, epoch, best_val_loss,
                os.path.join(args.checkpoint_dir, f'checkpoint_epoch_{epoch}.pth')
            )
        
        # 记录epoch信息
        epoch_time = time.time() - start_time
        logging.info(f"Epoch {epoch} 完成，用时: {epoch_time:.2f}s, 训练损失: {train_loss:.6f}")
        
        # 使用WandB记录
        if args.use_wandb:
            wandb.log({
                "train/epoch_loss": train_loss,
                "epoch_time": epoch_time,
                "epoch": epoch
            })
    
    # 保存最终模型
    save_checkpoint(
        model, optimizer, scheduler, args.epochs - 1, best_val_loss,
        os.path.join(args.checkpoint_dir, 'final_model.pth')
    )
    
    logging.info("训练完成!")
    
    # 关闭WandB
    if args.use_wandb:
        wandb.finish()


if __name__ == '__main__':
    main()