"""
数据归一化工具
用于计算数据集的全局统计量并进行统一归一化处理
"""

import json
from pathlib import Path
from tqdm import tqdm
import numpy as np
import torch
from typing import Dict, Tuple, Optional
import shutil
import scipy.io

from data.pa_us_dataset import PaUsDataset
from utils.sensors import preprocess_sensor_data


class DatasetNormalizer:
    def __init__(self, dataset_path: Path):
        """
        初始化数据集归一化器
        
        参数:
        - dataset_path: 数据集路径
        """
        self.dataset_path = Path(dataset_path)
        self.global_stats = None
        
    def calculate_global_stats(self, target_shape, save_stats: bool = False) -> Dict:
        """
        计算数据集的全局统计量（按channel维度）
        
        参数:
        - target_shape: 目标形状 (time_steps, channels)
        - save_stats: 是否保存统计量到文件（默认False）
        
        返回:
        - 包含每个channel全局最大值和最小值的字典
        """
        print(f"Calculating global statistics for sensor data...")
        
        # 使用PaUsDataset加载数据
        dataset = PaUsDataset(self.dataset_path)
        
        if len(dataset) == 0:
            raise ValueError(f"No data found in dataset at {self.dataset_path}")
        
        # 使用target_shape的第2个维度作为channel数量
        num_channels = target_shape[1]
        channel_mins = np.full(num_channels, float('inf'))
        channel_maxs = np.full(num_channels, float('-inf'))
        
        # 遍历数据集计算统计量
        for i in tqdm(range(len(dataset)), desc="Processing samples"):
            try:
                sample = dataset[i]
                data = sample[0]  # sensor_data是第一个元素
                
                # 转换为numpy数组（如果是tensor）
                if torch.is_tensor(data):
                    data = data.numpy()
                
                # 计算每个channel的最小值和最大值
                if len(data.shape) >= 2:
                    for ch in range(min(num_channels, data.shape[1])):
                        ch_data = data[:, ch]
                        ch_min = np.min(ch_data)
                        ch_max = np.max(ch_data)
                        
                        channel_mins[ch] = min(channel_mins[ch], ch_min)
                        channel_maxs[ch] = max(channel_maxs[ch], ch_max)
                
            except Exception as e:
                print(f"Warning: Failed to process sample {i}: {e}")
                continue
        
        # 检查是否有无效值
        valid_channels = (channel_mins != float('inf')) & (channel_maxs != float('-inf'))
        if not np.any(valid_channels):
            raise ValueError("No valid channel data found")
        
        self.global_stats = {
            'channel_mins': channel_mins.tolist(),
            'channel_maxs': channel_maxs.tolist(),
            'num_channels': num_channels,
            'target_shape': target_shape,
            'num_samples': len(dataset)
        }
        
        print(f"Global statistics calculated:")
        print(f"  Target shape: {target_shape}")
        print(f"  Channels: {num_channels}")
        print(f"  Overall min: {np.min(channel_mins[valid_channels]):.6f}")
        print(f"  Overall max: {np.max(channel_maxs[valid_channels]):.6f}")
        print(f"  Samples processed: {len(dataset)}")
        
        if save_stats:
            self.save_global_stats()
        
        return self.global_stats
    
    def save_global_stats(self, filepath: Optional[Path] = None):
        """
        保存全局统计量到JSON文件
        
        参数:
        - filepath: 保存路径，如果为None则使用默认路径
        """
        if self.global_stats is None:
            raise ValueError("No global statistics calculated. Call calculate_global_stats() first.")
        
        if filepath is None:
            filepath = self.dataset_path.parent / "global_stats.json"
        
        with open(filepath, 'w') as f:
            json.dump(self.global_stats, f, indent=2)
        
        print(f"Global statistics saved to {filepath}")
    
    def load_global_stats(self, filepath: Optional[Path] = None) -> Dict:
        """
        从JSON文件加载全局统计量
        
        参数:
        - filepath: 加载路径，如果为None则使用默认路径
        
        返回:
        - 全局统计量字典
        """
        if filepath is None:
            filepath = self.dataset_path.parent / "global_stats.json"
        
        if not filepath.exists():
            raise FileNotFoundError(f"Global statistics file not found: {filepath}")
        
        with open(filepath, 'r') as f:
            self.global_stats = json.load(f)
        
        print(f"Global statistics loaded from {filepath}")
        print(f"  Channels: {self.global_stats['num_channels']}")
        print(f"  Overall min: {np.min(self.global_stats['channel_mins']):.6f}")
        print(f"  Overall max: {np.max(self.global_stats['channel_maxs']):.6f}")
        
        return self.global_stats
    
    def normalize_data(self, sensor_data: np.ndarray, target_shape: Tuple[int, int] = (2560, 64)) -> torch.Tensor:
        """
        使用channel-wise全局统计量归一化数据到[-1, 1]范围
        
        参数:
        - sensor_data: 原始传感器数据
        - target_shape: 目标形状 (time_steps, channels)
        
        返回:
        - 归一化后的张量
        """
        if self.global_stats is None:
            raise ValueError("No global statistics available. Call calculate_global_stats() or load_global_stats() first.")
        
        # 使用target_shape的第2个维度作为channel数量
        target_channels = target_shape[1]
        
        # 获取数据的channel数量
        data_channels = sensor_data.shape[1] if len(sensor_data.shape) >= 2 else 1
        
        # 归一化每个channel到[-1, 1]范围
        normalized_data = sensor_data.copy()
        for ch in range(min(data_channels, target_channels, len(self.global_stats['channel_mins']))):
            ch_min = self.global_stats['channel_mins'][ch]
            ch_max = self.global_stats['channel_maxs'][ch]
            
            if ch_max > ch_min:  # 避免除零
                normalized_data[:, ch] = 2 * (sensor_data[:, ch] - ch_min) / (ch_max - ch_min) - 1
        
        # 使用第一个channel的统计量作为fallback
        global_max = self.global_stats['channel_maxs'][0]
        global_min = self.global_stats['channel_mins'][0]
        
        return preprocess_sensor_data(
            normalized_data,
            global_max,
            global_min,
            target_shape
        )
    
    def save_normalized_dataset(self, output_path: Path, target_shape: Tuple[int, int] = (2560, 64)):
        """
        保存归一化后的数据集到新的文件夹
        
        参数:
        - output_path: 输出数据集路径
        - target_shape: 目标形状
        """
        if self.global_stats is None:
            raise ValueError("No global statistics available. Call calculate_global_stats() first.")
        
        output_path = Path(output_path)
        output_path.mkdir(parents=True, exist_ok=True)
        
        print(f"Saving normalized dataset to {output_path}")
        
        # 加载原始数据集
        dataset = PaUsDataset(self.dataset_path, return_filestem=True)
        
        # 保存归一化统计量
        stats_file = output_path / "global_stats.json"
        with open(stats_file, 'w') as f:
            json.dump(self.global_stats, f, indent=2)
        
        # 处理每个样本
        for i in tqdm(range(len(dataset)), desc=f"Saving normalized data"):
            try:
                # 获取样本数据和文件名
                sensor_data, gt_image, das_image, stem = dataset[i]
                
                # 转换为numpy数组
                if torch.is_tensor(sensor_data):
                    numpy_data = sensor_data.numpy()
                else:
                    numpy_data = sensor_data
                
                # 归一化数据
                normalized_data = self._normalize_single_sample(numpy_data, target_shape)
                
                # 查找原始.mat文件
                original_mat_file = None
                if self.dataset_path.is_file():
                    # 如果是文件列表，需要找到对应的.mat文件
                    with self.dataset_path.open('r', encoding='utf-8') as f:
                        for line in f:
                            line = line.strip()
                            if line and line.endswith(stem):
                                original_mat_file = Path(line + "_rf.mat")
                                break
                else:
                    # 如果是目录，直接在目录中查找
                    original_files = list(self.dataset_path.rglob(f"{stem}_rf.mat"))
                    if original_files:
                        original_mat_file = original_files[0]
                
                if not original_mat_file or not original_mat_file.exists():
                    print(f"Warning: Original .mat file not found for {stem}")
                    continue
                
                # 读取原始.mat文件的完整结构
                original_mat_data = scipy.io.loadmat(original_mat_file)
                
                # 找到传感器数据的键名（通常是'rf_data'或类似）
                sensor_data_key = None
                for key in original_mat_data.keys():
                    if not key.startswith('__') and isinstance(original_mat_data[key], np.ndarray):
                        if original_mat_data[key].shape == numpy_data.shape:
                            sensor_data_key = key
                            break
                
                if sensor_data_key is None:
                    # 如果找不到匹配的键，使用默认键名
                    sensor_data_key = 'rf_data'
                
                # 替换传感器数据，保持其他所有字段不变
                original_mat_data[sensor_data_key] = normalized_data
                
                # 保持原始文件名结构
                output_file = output_path / f"{stem}_rf.mat"
                output_file.parent.mkdir(parents=True, exist_ok=True)
                
                # 保存完整的.mat文件结构
                scipy.io.savemat(output_file, original_mat_data)
                
                # 复制对应的图像文件
                original_dir = original_mat_file.parent
                
                # 复制GT图像（如果存在）
                gt_img_file = original_dir / f"{stem}_gt.png"
                if gt_img_file.exists():
                    output_gt_file = output_path / f"{stem}_gt.png"
                    shutil.copy2(gt_img_file, output_gt_file)
                
                # 复制DAS图像
                das_img_file = original_dir / f"{stem}_das.png"
                if das_img_file.exists():
                    output_das_file = output_path / f"{stem}_das.png"
                    shutil.copy2(das_img_file, output_das_file)
                
                # 复制其他可能的元数据文件
                for meta_pattern in ['*.json', '*.txt', '*.xml']:
                    for meta_file in original_dir.glob(meta_pattern):
                        if stem in meta_file.name:
                            output_meta_file = output_path / meta_file.name
                            if not output_meta_file.exists():
                                shutil.copy2(meta_file, output_meta_file)
                
            except Exception as e:
                print(f"Warning: Failed to process sample {i}: {e}")
                continue
        
        print(f"Normalized dataset saved to {output_path}")
        print(f"Total samples processed: {len(dataset)}")
        
        # 如果输入是文件列表，创建对应的文件列表
        if self.dataset_path.is_file():
            output_list_file = output_path / self.dataset_path.name
            with output_list_file.open('w', encoding='utf-8') as f:
                for i in range(len(dataset)):
                    _, _, _, stem = dataset[i]
                    f.write(f"{output_path / stem}\n")
            print(f"File list saved to {output_list_file}")
    
    def _normalize_single_sample(self, data: np.ndarray, target_shape: Tuple[int, int]) -> np.ndarray:
        """
        归一化单个样本数据
        
        参数:
        - data: 原始数据
        - target_shape: 目标形状 (time_steps, channels)
        
        返回:
        - 归一化后的numpy数组
        """
        # 使用target_shape的第2个维度作为channel数量
        target_channels = target_shape[1]
        
        # 获取数据的channel数量
        data_channels = data.shape[1] if len(data.shape) >= 2 else 1
        
        # 归一化每个channel到[-1, 1]范围
        normalized_data = data.copy()
        for ch in range(min(data_channels, target_channels, len(self.global_stats['channel_mins']))):
            ch_min = self.global_stats['channel_mins'][ch]
            ch_max = self.global_stats['channel_maxs'][ch]
            
            if ch_max > ch_min:  # 避免除零
                normalized_data[:, ch] = 2 * (data[:, ch] - ch_min) / (ch_max - ch_min) - 1
        
        # 调整到目标形状
        if normalized_data.shape != target_shape:
            try:
                from scipy import ndimage
                if len(normalized_data.shape) == 2:
                    normalized_data = ndimage.zoom(
                        normalized_data, 
                        (target_shape[0] / normalized_data.shape[0],
                         target_shape[1] / normalized_data.shape[1]),
                        order=1
                    )
            except ImportError:
                print("Warning: scipy not available, using simple interpolation")
                # 简单的线性插值
                from sklearn.preprocessing import MinMaxScaler
                if len(normalized_data.shape) == 2:
                    # 简单resize (可能需要更复杂的插值方法)
                    normalized_data = np.resize(normalized_data, target_shape)
        
        return normalized_data

    def get_stats(self) -> Optional[Dict]:
        """
        获取当前的全局统计量
        
        返回:
        - 全局统计量字典或None
        """
        return self.global_stats


if __name__ == "__main__":
    # 示例用法
    input_dataset_path = Path(r'D:\Downloads\pa_dataset')
    output_dataset_path = Path(r'D:\Downloads\pa_dataset_normalized')
    target_shape = (2560, 64)
    
    # 创建归一化数据集
    print("Creating normalized dataset...")
    normalizer = DatasetNormalizer(input_dataset_path)
    
    # 计算全局统计量
    normalizer.calculate_global_stats(target_shape, save_stats=True)
    
    # 保存归一化数据集
    normalizer.save_normalized_dataset(output_dataset_path, target_shape)
    
    print("Normalized dataset creation complete!")
