import os
import numpy as np
import torch
from torch.utils.data import Dataset
import warnings

warnings.filterwarnings('ignore')

class SpatialCorrDataset(Dataset):
    """
    空间相关性数据集加载器
    仅使用空间相关性数据作为输入，不再使用信道特征
    优化版本：添加预计算缓存和GPU内存优化
    """
    # 添加静态变量用于缓存文件数据
    _file_cache = None
    _processed_cache = {}  # 添加预处理结果缓存
    
    def __init__(self, root_path, flag='train', size=None, 
                 features='M', data_path='spatial_correlation_data.npz'):
        """
        初始化空间相关性数据集
        
        Args:
            root_path (str): 数据集根目录
            flag (str): 'train'/'val'/'test'/'all' 指定加载哪个部分的数据
            size (list): [seq_len, label_len, pred_len] 分别表示输入序列长度、标签长度和预测长度
            features (str): 特征类型，默认为'M'多变量
            data_path (str): 数据文件名
        """
        # 初始化大小信息
        if size == None:
            self.seq_len = 20  # 默认为20个时间点的序列
            self.label_len = 10
            self.pred_len = 10
        else:
            self.seq_len = size[0]
            self.label_len = size[1]
            self.pred_len = size[2]
            
        # 初始化信息
        assert flag in ['train', 'test', 'val', 'all']
        type_map = {'train': 0, 'val': 1, 'test': 2, 'all': 3}
        self.set_type = type_map[flag]
        self.flag = flag
        
        self.features = features
        self.root_path = root_path
        self.data_path = data_path
        self.feature_dim = 64  # 每个时间点64个复数值，分为实部和虚部
        
        # 生成缓存键
        self.cache_key = f"{data_path}_{flag}_{self.seq_len}"
        
        # 加载数据
        self._load_data()
    
    def _load_data(self):
        """加载空间相关性数据"""
        try:
            # 检查是否有预处理缓存
            if self.cache_key in SpatialCorrDataset._processed_cache:
                print(f"使用预处理缓存数据: {self.flag}")
                cached_data = SpatialCorrDataset._processed_cache[self.cache_key]
                for key, value in cached_data.items():
                    setattr(self, key, value)
                return
            
            # 确定文件路径
            data_file = os.path.join(self.root_path, self.data_path)
            print(f"加载空间相关性数据: {data_file}")
            
            # 检查文件是否存在
            if not os.path.exists(data_file):
                raise FileNotFoundError(f"找不到文件: {data_file}")
            
            # 如果已有缓存，直接使用
            if SpatialCorrDataset._file_cache is not None:
                print("使用缓存的空间相关性数据")
                data = SpatialCorrDataset._file_cache
            else:
                # 加载NPZ文件
                data = np.load(data_file)
                # 缓存数据
                SpatialCorrDataset._file_cache = data
            
            # 检查数据键
            required_keys = ['spatial_corr_real', 'spatial_corr_imag', 'window_time_ids']
            for key in required_keys:
                if key not in data:
                    raise ValueError(f"数据文件缺少必要的键: {key}")
            
            # 获取数据
            spatial_corr_real = data['spatial_corr_real']  # [n_samples, window_size, 64]
            spatial_corr_imag = data['spatial_corr_imag']  # [n_samples, window_size, 64]
            time_ids = data['window_time_ids']            # [n_samples, window_size]
            
            # 打印数据形状
            print(f"空间相关性数据形状:")
            print(f"- 实部: {spatial_corr_real.shape}")
            print(f"- 虚部: {spatial_corr_imag.shape}")
            print(f"- 时间点ID: {time_ids.shape}")
            
            # 创建窗口索引数组并打乱顺序
            num_samples = len(spatial_corr_real)
            window_indices = np.arange(num_samples)
            np.random.seed(42)  # 设置随机种子以确保结果可重现
            np.random.shuffle(window_indices)
            
            # 按打乱后的顺序重新排列数据
            spatial_corr_real = spatial_corr_real[window_indices]
            spatial_corr_imag = spatial_corr_imag[window_indices]
            time_ids = time_ids[window_indices]
            
            print(f"已打乱窗口顺序，确保训练集包含整个时间范围的数据")
            
            # 划分数据集
            border1s = [0, int(num_samples*0.7), int(num_samples*0.8), 0]
            border2s = [int(num_samples*0.7), int(num_samples*0.8), num_samples, num_samples]
            border1 = border1s[self.set_type]
            border2 = border2s[self.set_type]
            
            # 划分数据
            self.spatial_corr_real = spatial_corr_real[border1:border2]
            self.spatial_corr_imag = spatial_corr_imag[border1:border2]
            self.time_ids = time_ids[border1:border2]
            
            # 输出训练集中时间点ID的分布情况
            if self.set_type == 0:  # 训练集
                unique_time_ids = np.unique(self.time_ids.flatten())
                print(f"训练集包含的时间点ID数量: {len(unique_time_ids)}")
                print(f"训练集时间点ID范围: {unique_time_ids.min()} - {unique_time_ids.max()}")
                # 显示时间点ID的分布
                time_id_samples = unique_time_ids[::len(unique_time_ids)//10] if len(unique_time_ids) > 10 else unique_time_ids
                print(f"训练集时间点ID分布样本: {time_id_samples}")
            
            # 生成时间特征（简单的索引）
            window_size = self.spatial_corr_real.shape[1]
            self.time_stamps = np.expand_dims(np.arange(window_size), axis=0)
            self.time_stamps = np.repeat(self.time_stamps, len(self.spatial_corr_real), axis=0)
            
            # 预计算合并后的数据以减少运行时开销
            print("预计算数据转换...")
            self.spatial_corr_combined = np.concatenate([
                self.spatial_corr_real, self.spatial_corr_imag
            ], axis=2).astype(np.float32)  # [n_samples, seq_len, 128]
            
            # 预计算切片的数据
            if self.seq_len <= window_size:
                self.spatial_corr_real = self.spatial_corr_real[:, :self.seq_len].astype(np.float32)
                self.spatial_corr_imag = self.spatial_corr_imag[:, :self.seq_len].astype(np.float32) 
                self.spatial_corr_combined = self.spatial_corr_combined[:, :self.seq_len]
                self.time_ids = self.time_ids[:, :self.seq_len]
                self.time_stamps = self.time_stamps[:, :self.seq_len]
            
            # 缓存预处理结果
            cache_data = {
                'spatial_corr_real': self.spatial_corr_real,
                'spatial_corr_imag': self.spatial_corr_imag,
                'spatial_corr_combined': self.spatial_corr_combined,
                'time_ids': self.time_ids,
                'time_stamps': self.time_stamps
            }
            SpatialCorrDataset._processed_cache[self.cache_key] = cache_data
            
            # 打印划分后的数据
            print(f"划分后的数据形状 ({self.flag}):")
            print(f"- 实部: {self.spatial_corr_real.shape}")
            print(f"- 虚部: {self.spatial_corr_imag.shape}")
            print(f"- 合并后: {self.spatial_corr_combined.shape}")
            print(f"- 时间点ID: {self.time_ids.shape}")
            
        except Exception as e:
            print(f"加载数据失败: {e}")
            raise
    
    def __getitem__(self, index):
        """
        获取单个样本 - 优化版本，减少运行时计算
        
        Args:
            index (int): 样本索引
            
        Returns:
            dict: 包含输入数据的字典
        """
        # 直接使用预计算的数据，避免运行时合并
        seq_combined = self.spatial_corr_combined[index]    # [seq_len, 128]
        seq_real = self.spatial_corr_real[index]            # [seq_len, 64]
        seq_imag = self.spatial_corr_imag[index]            # [seq_len, 64]
        seq_time_ids = self.time_ids[index]                 # [seq_len]
        seq_x_mark = self.time_stamps[index]                # [seq_len]
        
        # 返回数据字典 - 使用from_numpy避免额外的复制
        x_dict = {
            'data': torch.from_numpy(seq_combined.copy()),              # [seq_len, 128]
            'spatial_corr_real': torch.from_numpy(seq_real.copy()),     # [seq_len, 64]
            'spatial_corr_imag': torch.from_numpy(seq_imag.copy()),     # [seq_len, 64]
            'time_ids': torch.from_numpy(seq_time_ids.copy()).long(),   # [seq_len]
            'is_spatial_corr_only': torch.tensor(True)                  # 标记这是纯空间相关性数据
        }
        
        return x_dict, torch.from_numpy(seq_x_mark.copy()).float().unsqueeze(-1)
    
    def __len__(self):
        """返回数据集长度"""
        return len(self.spatial_corr_real)
    
    @classmethod
    def clear_cache(cls):
        """清除所有缓存"""
        cls._file_cache = None
        cls._processed_cache.clear()
        print("已清除数据集缓存") 