"""
数据预处理和工具函数
"""

import torch
import numpy as np
import h5py
import os
import glob
from typing import Tuple, Dict, List, Optional, Union


class DataNormalizer:
    """数据归一化器"""
    
    def __init__(self, method='minmax'):
        """初始化归一化器
        
        参数:
            method: 归一化方法，可选 'minmax' 或 'zscore'
        """
        self.method = method
        self.min_val = None
        self.max_val = None
        self.mean = None
        self.std = None
    
    def fit(self, data: torch.Tensor):
        """计算归一化参数
        
        参数:
            data: 数据张量，形状为 [..., features]
        """
        if self.method == 'minmax':
            self.min_val = torch.min(data, dim=0)[0]
            self.max_val = torch.max(data, dim=0)[0]
        elif self.method == 'zscore':
            self.mean = torch.mean(data, dim=0)
            self.std = torch.std(data, dim=0)
        else:
            raise ValueError(f"Unknown normalization method: {self.method}")
    
    def transform(self, data: torch.Tensor) -> torch.Tensor:
        """应用归一化
        
        参数:
            data: 数据张量
            
        返回:
            归一化后的数据
        """
        if self.method == 'minmax':
            if self.min_val is None or self.max_val is None:
                raise ValueError("Normalizer not fitted. Call fit() first.")
            return (data - self.min_val) / (self.max_val - self.min_val + 1e-8)
        elif self.method == 'zscore':
            if self.mean is None or self.std is None:
                raise ValueError("Normalizer not fitted. Call fit() first.")
            return (data - self.mean) / (self.std + 1e-8)
    
    def inverse_transform(self, data: torch.Tensor) -> torch.Tensor:
        """逆归一化
        
        参数:
            data: 归一化后的数据
            
        返回:
            原始尺度数据
        """
        if self.method == 'minmax':
            if self.min_val is None or self.max_val is None:
                raise ValueError("Normalizer not fitted. Call fit() first.")
            return data * (self.max_val - self.min_val) + self.min_val
        elif self.method == 'zscore':
            if self.mean is None or self.std is None:
                raise ValueError("Normalizer not fitted. Call fit() first.")
            return data * self.std + self.mean
    
    def save(self, path: str):
        """保存归一化参数
        
        参数:
            path: 保存路径
        """
        torch.save({
            'method': self.method,
            'min_val': self.min_val,
            'max_val': self.max_val,
            'mean': self.mean,
            'std': self.std
        }, path)
    
    def load(self, path: str):
        """加载归一化参数
        
        参数:
            path: 加载路径
        """
        params = torch.load(path)
        self.method = params['method']
        self.min_val = params['min_val']
        self.max_val = params['max_val']
        self.mean = params['mean']
        self.std = params['std']


class DataProcessor:
    """数据处理器"""
    
    def __init__(
        self,
        spatial_resolution: Tuple[int, int, int] = (64, 64, 64),
        temporal_resolution: int = 50,
        normalize: bool = True,
        normalization_method: str = 'minmax'
    ):
        """初始化数据处理器
        
        参数:
            spatial_resolution: 空间分辨率 (nx, ny, nz)
            temporal_resolution: 时间分辨率 nt
            normalize: 是否归一化
            normalization_method: 归一化方法
        """
        self.spatial_resolution = spatial_resolution
        self.temporal_resolution = temporal_resolution
        self.normalize = normalize
        
        if normalize:
            self.normalizer = DataNormalizer(method=normalization_method)
        else:
            self.normalizer = None
    
    def process_raw_data(
        self,
        data_dir: str,
        output_dir: str,
        field_names: List[str] = ['velocity', 'magnetic_field'],
        output_pattern: str = 'output-????',
        sub_x: int = 1,
        sub_y: int = 1,
        sub_z: int = 1,
        sub_t: int = 1
    ):
        """处理原始数据
        
        参数:
            data_dir: 原始数据目录
            output_dir: 处理后数据输出目录
            field_names: 场名称列表
            output_pattern: 输出文件名模式
            sub_x, sub_y, sub_z: 空间采样间隔
            sub_t: 时间采样间隔
        """
        os.makedirs(output_dir, exist_ok=True)
        
        # 获取数据文件列表
        pattern = os.path.join(data_dir, output_pattern)
        file_list = sorted(glob.glob(pattern))
        
        # 收集所有数据用于计算归一化参数
        all_data = []
        
        for file_path in file_list:
            with h5py.File(file_path, 'r') as f:
                # 读取场数据
                fields = {}
                for field_name in field_names:
                    if field_name in f:
                        fields[field_name] = f[field_name][:]
                
                # 提取速度和磁场分量
                if 'velocity' in fields:
                    velocity = fields['velocity']
                    u = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 0]
                    v = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 1]
                    w = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 2]
                else:
                    # 如果没有速度场，创建零场
                    nt, nx, ny, nz = self.temporal_resolution, *self.spatial_resolution
                    u = np.zeros((nt, nx, ny, nz))
                    v = np.zeros((nt, nx, ny, nz))
                    w = np.zeros((nt, nx, ny, nz))
                
                if 'magnetic_field' in fields:
                    magnetic_field = fields['magnetic_field']
                    Bx = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 0]
                    By = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 1]
                    Bz = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 2]
                else:
                    # 如果没有磁场，创建零场
                    nt, nx, ny, nz = self.temporal_resolution, *self.spatial_resolution
                    Bx = np.zeros((nt, nx, ny, nz))
                    By = np.zeros((nt, nx, ny, nz))
                    Bz = np.zeros((nt, nx, ny, nz))
                
                # 收集数据用于归一化
                data = np.stack([u, v, w, Bx, By, Bz], axis=-1)
                all_data.append(torch.from_numpy(data).float())
        
        # 计算归一化参数
        if self.normalize and self.normalizer is not None:
            all_data_tensor = torch.cat(all_data, dim=0)
            self.normalizer.fit(all_data_tensor)
        
        # 处理并保存每个文件
        for i, file_path in enumerate(file_list):
            with h5py.File(file_path, 'r') as f:
                # 读取坐标
                t = f['t'][::sub_t]
                x = f['x'][::sub_x]
                y = f['y'][::sub_y]
                z = f['z'][::sub_z]
                
                # 读取场数据
                fields = {}
                for field_name in field_names:
                    if field_name in f:
                        fields[field_name] = f[field_name][:]
                
                # 提取速度和磁场分量
                if 'velocity' in fields:
                    velocity = fields['velocity']
                    u = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 0]
                    v = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 1]
                    w = velocity[::sub_t, ::sub_x, ::sub_y, ::sub_z, 2]
                else:
                    # 如果没有速度场，创建零场
                    nt, nx, ny, nz = self.temporal_resolution, *self.spatial_resolution
                    u = np.zeros((nt, nx, ny, nz))
                    v = np.zeros((nt, nx, ny, nz))
                    w = np.zeros((nt, nx, ny, nz))
                
                if 'magnetic_field' in fields:
                    magnetic_field = fields['magnetic_field']
                    Bx = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 0]
                    By = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 1]
                    Bz = magnetic_field[::sub_t, ::sub_x, ::sub_y, ::sub_z, 2]
                else:
                    # 如果没有磁场，创建零场
                    nt, nx, ny, nz = self.temporal_resolution, *self.spatial_resolution
                    Bx = np.zeros((nt, nx, ny, nz))
                    By = np.zeros((nt, nx, ny, nz))
                    Bz = np.zeros((nt, nx, ny, nz))
                
                # 应用归一化
                if self.normalize and self.normalizer is not None:
                    data = torch.stack([
                        torch.from_numpy(u), torch.from_numpy(v), torch.from_numpy(w),
                        torch.from_numpy(Bx), torch.from_numpy(By), torch.from_numpy(Bz)
                    ], dim=-1).float()
                    
                    normalized_data = self.normalizer.transform(data)
                    
                    u_norm = normalized_data[..., 0].numpy()
                    v_norm = normalized_data[..., 1].numpy()
                    w_norm = normalized_data[..., 2].numpy()
                    Bx_norm = normalized_data[..., 3].numpy()
                    By_norm = normalized_data[..., 4].numpy()
                    Bz_norm = normalized_data[..., 5].numpy()
                else:
                    u_norm = u
                    v_norm = v
                    w_norm = w
                    Bx_norm = Bx
                    By_norm = By
                    Bz_norm = Bz
                
                # 保存处理后的数据
                output_path = os.path.join(output_dir, f'output-{i:04d}.h5')
                with h5py.File(output_path, 'w') as f_out:
                    # 保存坐标
                    f_out.create_dataset('t', data=t)
                    f_out.create_dataset('x', data=x)
                    f_out.create_dataset('y', data=y)
                    f_out.create_dataset('z', data=z)
                    
                    # 保存场数据
                    velocity = np.stack([u_norm, v_norm, w_norm], axis=-1)
                    magnetic_field = np.stack([Bx_norm, By_norm, Bz_norm], axis=-1)
                    
                    f_out.create_dataset('velocity', data=velocity)
                    f_out.create_dataset('magnetic_field', data=magnetic_field)
        
        # 保存归一化参数
        if self.normalize and self.normalizer is not None:
            self.normalizer.save(os.path.join(output_dir, 'normalizer.pth'))


def create_dataloader(
    data_dir: str,
    batch_size: int = 1,
    shuffle: bool = True,
    num_workers: int = 0,
    pin_memory: bool = False,
    distributed: bool = False,
    field_names: List[str] = ['velocity', 'magnetic_field'],
    output_names: str = 'output-????',
    sub_x: int = 1,
    sub_y: int = 1,
    sub_z: int = 1,
    sub_t: int = 1,
    ind_x: Optional[int] = None,
    ind_y: Optional[int] = None,
    ind_z: Optional[int] = None,
    ind_t: Optional[int] = None,
    normalize: bool = True,
    train: bool = True
) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
    """创建数据加载器
    
    参数:
        data_dir: 数据目录
        batch_size: 批次大小
        shuffle: 是否打乱数据
        num_workers: 工作进程数
        pin_memory: 是否固定内存
        distributed: 是否分布式训练
        field_names: 场名称列表
        output_names: 输出文件名模式
        sub_x, sub_y, sub_z: 空间采样间隔
        sub_t: 时间采样间隔
        ind_x, ind_y, ind_z: 空间索引上限
        ind_t: 时间索引上限
        normalize: 是否归一化数据
        train: 是否为训练集
        
    返回:
        数据加载器和采样器
    """
    from .em_dataset import EMFieldDataset, EMFieldDataLoader
    
    # 创建数据集
    dataset = EMFieldDataset(
        data_dir=data_dir,
        field_names=field_names,
        output_names=output_names,
        sub_x=sub_x,
        sub_y=sub_y,
        sub_z=sub_z,
        sub_t=sub_t,
        ind_x=ind_x,
        ind_y=ind_y,
        ind_z=ind_z,
        ind_t=ind_t,
        normalize=normalize,
        train=train
    )
    
    # 创建数据加载器
    data_loader = EMFieldDataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        distributed=distributed
    )
    
    return data_loader.get_dataloader()


def compute_statistics(
    data_dir: str,
    field_names: List[str] = ['velocity', 'magnetic_field'],
    output_names: str = 'output-????',
    sub_x: int = 1,
    sub_y: int = 1,
    sub_z: int = 1,
    sub_t: int = 1,
    ind_x: Optional[int] = None,
    ind_y: Optional[int] = None,
    ind_z: Optional[int] = None,
    ind_t: Optional[int] = None
) -> Dict[str, torch.Tensor]:
    """计算数据集统计信息
    
    参数:
        data_dir: 数据目录
        field_names: 场名称列表
        output_names: 输出文件名模式
        sub_x, sub_y, sub_z: 空间采样间隔
        sub_t: 时间采样间隔
        ind_x, ind_y, ind_z: 空间索引上限
        ind_t: 时间索引上限
        
    返回:
        包含统计信息的字典
    """
    from .em_dataset import EMFieldDataset
    
    # 创建数据集
    dataset = EMFieldDataset(
        data_dir=data_dir,
        field_names=field_names,
        output_names=output_names,
        sub_x=sub_x,
        sub_y=sub_y,
        sub_z=sub_z,
        sub_t=sub_t,
        ind_x=ind_x,
        ind_y=ind_y,
        ind_z=ind_z,
        ind_t=ind_t,
        normalize=False,
        train=True
    )
    
    # 收集所有数据
    all_inputs = []
    all_outputs = []
    
    for inputs, outputs in dataset:
        all_inputs.append(inputs)
        all_outputs.append(outputs)
    
    # 堆叠数据
    all_inputs = torch.cat(all_inputs, dim=0)
    all_outputs = torch.cat(all_outputs, dim=0)
    
    # 计算统计信息
    stats = {
        'inputs_mean': torch.mean(all_inputs, dim=0),
        'inputs_std': torch.std(all_inputs, dim=0),
        'inputs_min': torch.min(all_inputs, dim=0)[0],
        'inputs_max': torch.max(all_inputs, dim=0)[0],
        'outputs_mean': torch.mean(all_outputs, dim=0),
        'outputs_std': torch.std(all_outputs, dim=0),
        'outputs_min': torch.min(all_outputs, dim=0)[0],
        'outputs_max': torch.max(all_outputs, dim=0)[0]
    }
    
    return stats


def visualize_field(
    field: torch.Tensor,
    t_idx: int = 0,
    z_idx: int = None,
    save_path: Optional[str] = None,
    title: Optional[str] = None
):
    """可视化场数据
    
    参数:
        field: 场数据，形状为 [nt, nx, ny, nz, components]
        t_idx: 时间索引
        z_idx: z轴索引，如果为None则取中间切片
        save_path: 保存路径
        title: 图标题
    """
    import matplotlib.pyplot as plt
    
    nt, nx, ny, nz, n_comp = field.shape
    
    # 如果z_idx为None，取中间切片
    if z_idx is None:
        z_idx = nz // 2
    
    # 创建子图
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    
    # 绘制每个分量
    components = ['u', 'v', 'Bx', 'By']
    for i, ax in enumerate(axes.flat):
        if i < n_comp:
            im = ax.imshow(
                field[t_idx, :, :, z_idx, i].cpu().numpy(),
                origin='lower',
                cmap='viridis'
            )
            ax.set_title(f'{components[i]} (t={t_idx}, z={z_idx})')
            plt.colorbar(im, ax=ax)
    
    # 设置标题
    if title:
        fig.suptitle(title)
    else:
        fig.suptitle(f'Field Visualization (t={t_idx}, z={z_idx})')
    
    plt.tight_layout()
    
    # 保存图像
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    
    plt.show()


def save_predictions(
    inputs: torch.Tensor,
    outputs: torch.Tensor,
    predictions: torch.Tensor,
    save_dir: str,
    sample_idx: int = 0
):
    """保存预测结果
    
    参数:
        inputs: 输入数据
        outputs: 真实输出
        predictions: 预测输出
        save_dir: 保存目录
        sample_idx: 样本索引
    """
    os.makedirs(save_dir, exist_ok=True)
    
    # 转换为numpy
    inputs_np = inputs[sample_idx].cpu().numpy()
    outputs_np = outputs[sample_idx].cpu().numpy()
    predictions_np = predictions[sample_idx].cpu().numpy()
    
    # 保存为h5文件
    save_path = os.path.join(save_dir, f'predictions_{sample_idx:04d}.h5')
    with h5py.File(save_path, 'w') as f:
        # 保存坐标
        f.create_dataset('t', data=inputs_np[..., 0])
        f.create_dataset('x', data=inputs_np[..., 1])
        f.create_dataset('y', data=inputs_np[..., 2])
        f.create_dataset('z', data=inputs_np[..., 3])
        
        # 保存场数据
        f.create_dataset('true_u', data=outputs_np[..., 0])
        f.create_dataset('true_v', data=outputs_np[..., 1])
        f.create_dataset('true_Bx', data=outputs_np[..., 2])
        f.create_dataset('true_By', data=outputs_np[..., 3])
        
        f.create_dataset('pred_u', data=predictions_np[..., 0])
        f.create_dataset('pred_v', data=predictions_np[..., 1])
        f.create_dataset('pred_Bx', data=predictions_np[..., 2])
        f.create_dataset('pred_By', data=predictions_np[..., 3])
    
    # 可视化结果
    t_idx = outputs_np.shape[0] // 2  # 中间时间步
    z_idx = outputs_np.shape[2] // 2  # 中间z切片
    
    # 创建子图
    fig, axes = plt.subplots(3, 4, figsize=(16, 12))
    
    # 绘制每个分量的真实值和预测值
    components = ['u', 'v', 'Bx', 'By']
    for i, comp in enumerate(components):
        # 真实值
        im = axes[0, i].imshow(
            outputs_np[t_idx, :, :, z_idx, i],
            origin='lower',
            cmap='viridis'
        )
        axes[0, i].set_title(f'True {comp}')
        plt.colorbar(im, ax=axes[0, i])
        
        # 预测值
        im = axes[1, i].imshow(
            predictions_np[t_idx, :, :, z_idx, i],
            origin='lower',
            cmap='viridis'
        )
        axes[1, i].set_title(f'Pred {comp}')
        plt.colorbar(im, ax=axes[1, i])
        
        # 误差
        error = np.abs(outputs_np[t_idx, :, :, z_idx, i] - predictions_np[t_idx, :, :, z_idx, i])
        im = axes[2, i].imshow(
            error,
            origin='lower',
            cmap='hot'
        )
        axes[2, i].set_title(f'Error {comp}')
        plt.colorbar(im, ax=axes[2, i])
    
    plt.tight_layout()
    
    # 保存图像
    viz_path = os.path.join(save_dir, f'predictions_{sample_idx:04d}.png')
    plt.savefig(viz_path, dpi=300, bbox_inches='tight')
    
    plt.show()