"""
三维电磁场预测模型推理脚本
"""

import os
import sys
import argparse
import time
import logging
from datetime import datetime

import torch
import numpy as np
import h5py
import matplotlib.pyplot as plt

# 添加项目路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from em_generation.models.em_predictor import EMFieldPredictor
from em_generation.data.data_utils import create_dataloader, save_predictions, visualize_field
from em_generation.utils import setup_logging, load_checkpoint


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='三维电磁场预测模型推理')
    
    # 模型参数
    parser.add_argument('--model_path', type=str, required=True, help='模型检查点路径')
    parser.add_argument('--model_type', type=str, default='mhd', choices=['mhd', 'maxwell'], help='模型类型')
    parser.add_argument('--modes', type=int, default=8, help='傅里叶模式数')
    parser.add_argument('--width', type=int, default=32, help='网络宽度')
    parser.add_argument('--n_layers', type=int, default=4, help='网络层数')
    parser.add_argument('--input_dim', type=int, default=7, help='输入维度')
    parser.add_argument('--output_dim', type=int, default=4, help='输出维度')
    
    # 数据参数
    parser.add_argument('--data_dir', type=str, required=True, help='数据目录')
    parser.add_argument('--batch_size', type=int, default=1, help='批次大小')
    parser.add_argument('--num_workers', type=int, default=0, help='数据加载器工作进程数')
    parser.add_argument('--normalize', type=bool, default=True, help='是否归一化数据')
    
    # 输出参数
    parser.add_argument('--output_dir', type=str, default='./inference_results', help='输出目录')
    parser.add_argument('--save_predictions', action='store_true', help='保存预测结果')
    parser.add_argument('--visualize', action='store_true', help='可视化结果')
    parser.add_argument('--num_samples', type=int, default=5, help='处理的样本数量')
    
    # 其他参数
    parser.add_argument('--device', type=str, default='auto', choices=['auto', 'cpu', 'cuda'], help='计算设备')
    parser.add_argument('--log_dir', type=str, default='./logs', help='日志目录')
    
    return parser.parse_args()


def setup_device(device):
    """设置计算设备"""
    if device == 'auto':
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    print(f"使用设备: {device}")
    return device


def load_model(model_path, args, device):
    """加载模型"""
    # 创建模型
    model = EMFieldPredictor(
        modes=args.modes,
        width=args.width,
        n_layers=args.n_layers,
        input_dim=args.input_dim,
        output_dim=args.output_dim
    )
    
    # 加载检查点
    checkpoint = load_checkpoint(model_path, device)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # 获取训练信息
    epoch = checkpoint.get('epoch', 0)
    val_loss = checkpoint.get('best_val_loss', float('inf'))
    
    print(f"加载模型: {model_path}")
    print(f"训练轮数: {epoch}")
    print(f"验证损失: {val_loss:.6f}")
    
    # 计算参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"模型参数总数: {total_params:,}")
    print(f"可训练参数数: {trainable_params:,}")
    
    model = model.to(device)
    model.eval()
    
    return model


def inference_single_sample(model, sample, device):
    """对单个样本进行推理"""
    model.eval()
    
    with torch.no_grad():
        # 移动数据到设备
        inputs = sample[0].unsqueeze(0).to(device)  # 添加批次维度
        targets = sample[1].unsqueeze(0).to(device)
        
        # 前向传播
        start_time = time.time()
        outputs = model(inputs)
        inference_time = time.time() - start_time
        
        # 计算误差
        mse_error = torch.mean((outputs - targets) ** 2).item()
        mae_error = torch.mean(torch.abs(outputs - targets)).item()
        
        # 返回结果
        return {
            'inputs': inputs.squeeze(0).cpu(),
            'targets': targets.squeeze(0).cpu(),
            'outputs': outputs.squeeze(0).cpu(),
            'inference_time': inference_time,
            'mse_error': mse_error,
            'mae_error': mae_error
        }


def compute_field_statistics(predictions, targets):
    """计算场统计信息"""
    # 计算相对误差
    rel_error = torch.abs(predictions - targets) / (torch.abs(targets) + 1e-8)
    
    # 计算各分量的统计信息
    components = ['u', 'v', 'Bx', 'By']
    stats = {}
    
    for i, comp in enumerate(components):
        pred_comp = predictions[..., i]
        target_comp = targets[..., i]
        rel_error_comp = rel_error[..., i]
        
        stats[comp] = {
            'mean_pred': torch.mean(pred_comp).item(),
            'std_pred': torch.std(pred_comp).item(),
            'mean_target': torch.mean(target_comp).item(),
            'std_target': torch.std(target_comp).item(),
            'mean_rel_error': torch.mean(rel_error_comp).item(),
            'max_rel_error': torch.max(rel_error_comp).item(),
            'rmse': torch.sqrt(torch.mean((pred_comp - target_comp) ** 2)).item()
        }
    
    return stats


def visualize_comparison(results, save_path=None):
    """可视化预测结果与真实值的比较"""
    inputs = results['inputs']
    targets = results['targets']
    outputs = results['outputs']
    
    # 获取维度
    nt, nx, ny, nz, n_comp = targets.shape
    
    # 选择中间时间步和z切片
    t_idx = nt // 2
    z_idx = nz // 2
    
    # 创建子图
    fig, axes = plt.subplots(3, 4, figsize=(16, 12))
    
    # 绘制每个分量的真实值、预测值和误差
    components = ['u', 'v', 'Bx', 'By']
    for i, comp in enumerate(components):
        # 真实值
        im = axes[0, i].imshow(
            targets[t_idx, :, :, z_idx, i].numpy(),
            origin='lower',
            cmap='viridis'
        )
        axes[0, i].set_title(f'True {comp}')
        plt.colorbar(im, ax=axes[0, i])
        
        # 预测值
        im = axes[1, i].imshow(
            outputs[t_idx, :, :, z_idx, i].numpy(),
            origin='lower',
            cmap='viridis'
        )
        axes[1, i].set_title(f'Pred {comp}')
        plt.colorbar(im, ax=axes[1, i])
        
        # 误差
        error = np.abs(targets[t_idx, :, :, z_idx, i].numpy() - outputs[t_idx, :, :, z_idx, i].numpy())
        im = axes[2, i].imshow(
            error,
            origin='lower',
            cmap='hot'
        )
        axes[2, i].set_title(f'Error {comp}')
        plt.colorbar(im, ax=axes[2, i])
    
    plt.tight_layout()
    
    # 保存图像
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    
    plt.show()


def visualize_time_evolution(results, save_path=None):
    """可视化时间演化"""
    inputs = results['inputs']
    targets = results['targets']
    outputs = results['outputs']
    
    # 获取维度
    nt, nx, ny, nz, n_comp = targets.shape
    
    # 选择中心点和z切片
    x_idx, y_idx, z_idx = nx // 2, ny // 2, nz // 2
    
    # 创建子图
    fig, axes = plt.subplots(2, 2, figsize=(12, 8))
    
    # 绘制每个分量的时间演化
    components = ['u', 'v', 'Bx', 'By']
    for i, ax in enumerate(axes.flat):
        if i < n_comp:
            # 获取时间序列
            target_series = targets[:, x_idx, y_idx, z_idx, i].numpy()
            output_series = outputs[:, x_idx, y_idx, z_idx, i].numpy()
            time_points = inputs[:, 0, 0, 0, 0].numpy()  # 时间坐标
            
            # 绘制曲线
            ax.plot(time_points, target_series, 'b-', label='True')
            ax.plot(time_points, output_series, 'r--', label='Pred')
            ax.set_xlabel('Time')
            ax.set_ylabel(f'{components[i]}')
            ax.set_title(f'Time Evolution of {components[i]}')
            ax.legend()
            ax.grid(True)
    
    plt.tight_layout()
    
    # 保存图像
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    
    plt.show()


def main():
    """主函数"""
    # 解析参数
    args = parse_args()
    
    # 设置设备
    device = setup_device(args.device)
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    os.makedirs(args.log_dir, exist_ok=True)
    
    # 设置日志
    setup_logging(os.path.join(args.log_dir, f'inference_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'))
    
    # 加载模型
    logging.info("加载模型...")
    model = load_model(args.model_path, args, device)
    
    # 创建数据加载器
    logging.info("创建数据加载器...")
    dataloader, _ = create_dataloader(
        data_dir=args.data_dir,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        distributed=False,
        normalize=args.normalize,
        train=False
    )
    
    logging.info(f"数据样本数: {len(dataloader.dataset)}")
    
    # 推理循环
    logging.info("开始推理...")
    all_results = []
    total_inference_time = 0.0
    total_mse_error = 0.0
    total_mae_error = 0.0
    
    for i, sample in enumerate(dataloader):
        if i >= args.num_samples:
            break
        
        # 对单个样本进行推理
        result = inference_single_sample(model, sample, device)
        all_results.append(result)
        
        # 累计统计信息
        total_inference_time += result['inference_time']
        total_mse_error += result['mse_error']
        total_mae_error += result['mae_error']
        
        # 记录日志
        logging.info(
            f"样本 {i}: 推理时间={result['inference_time']:.4f}s, "
            f"MSE={result['mse_error']:.6f}, MAE={result['mae_error']:.6f}"
        )
        
        # 保存预测结果
        if args.save_predictions:
            sample_dir = os.path.join(args.output_dir, f'sample_{i}')
            os.makedirs(sample_dir, exist_ok=True)
            
            save_predictions(
                result['inputs'], result['targets'], result['outputs'],
                sample_dir, sample_idx=0
            )
        
        # 可视化结果
        if args.visualize:
            # 比较可视化
            comparison_path = os.path.join(args.output_dir, f'sample_{i}_comparison.png')
            visualize_comparison(result, comparison_path)
            
            # 时间演化可视化
            evolution_path = os.path.join(args.output_dir, f'sample_{i}_evolution.png')
            visualize_time_evolution(result, evolution_path)
    
    # 计算平均统计信息
    num_samples = len(all_results)
    avg_inference_time = total_inference_time / num_samples
    avg_mse_error = total_mse_error / num_samples
    avg_mae_error = total_mae_error / num_samples
    
    # 计算场统计信息
    all_predictions = torch.stack([r['outputs'] for r in all_results])
    all_targets = torch.stack([r['targets'] for r in all_results])
    field_stats = compute_field_statistics(all_predictions, all_targets)
    
    # 打印统计信息
    logging.info("推理完成!")
    logging.info(f"处理样本数: {num_samples}")
    logging.info(f"平均推理时间: {avg_inference_time:.4f}s")
    logging.info(f"平均MSE: {avg_mse_error:.6f}")
    logging.info(f"平均MAE: {avg_mae_error:.6f}")
    
    for comp, stats in field_stats.items():
        logging.info(
            f"{comp}: RMSE={stats['rmse']:.6f}, "
            f"平均相对误差={stats['mean_rel_error']:.6f}, "
            f"最大相对误差={stats['max_rel_error']:.6f}"
        )
    
    # 保存统计信息
    stats_path = os.path.join(args.output_dir, 'inference_stats.txt')
    with open(stats_path, 'w') as f:
        f.write(f"模型路径: {args.model_path}\n")
        f.write(f"数据目录: {args.data_dir}\n")
        f.write(f"处理样本数: {num_samples}\n")
        f.write(f"平均推理时间: {avg_inference_time:.4f}s\n")
        f.write(f"平均MSE: {avg_mse_error:.6f}\n")
        f.write(f"平均MAE: {avg_mae_error:.6f}\n\n")
        
        f.write("各分量统计信息:\n")
        for comp, stats in field_stats.items():
            f.write(
                f"{comp}: RMSE={stats['rmse']:.6f}, "
                f"平均相对误差={stats['mean_rel_error']:.6f}, "
                f"最大相对误差={stats['max_rel_error']:.6f}\n"
            )
    
    logging.info(f"统计信息已保存到: {stats_path}")


if __name__ == '__main__':
    main()