# src/dataset.py

import sys
import os
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import glob
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
import pickle
from src.config_loader import load_config
from src.augmentations import TimeSeriesAugmentation


class HRVDataset(Dataset):
    """
    HRV数据集类 - 按患者严格划分训练/验证/测试集
    
    核心改进：
    1. 按患者ID划分数据集，确保同一患者的所有记录只出现在一个数据集中
    2. 使用单例模式共享数据加载和标准化器，避免重复加载
    3. 支持保存和加载预处理后的数据
    """
    
    # 类变量：共享的数据和标准化器
    _all_samples = None
    _scaler = None
    _patient_splits = None
    
    def __init__(self, split='train', force_reload=False, cfg=None, enable_augmentation=True, use_multimodal=True):
        """
        Args:
            split: 'train', 'val', 或 'test'
            force_reload: 是否强制重新加载数据（默认False，会复用已加载的数据）
            cfg: 配置字典（可选，如果为None则自动加载）
            enable_augmentation: 是否启用数据增强（仅对训练集有效）
            use_multimodal: 是否使用多模态输入（时间序列+表格数据）
        """
        self.split = split
        self.is_train = (split == 'train')
        self.use_multimodal = use_multimodal
        
        # 加载配置
        if cfg is None:
            cfg = load_config()
        self.cfg = cfg
        
        # 初始化数据增强器（仅在训练模式下使用）
        self.enable_augmentation = enable_augmentation and self.is_train
        if self.enable_augmentation:
            aug_config = cfg.get('augmentation', None)
            self.augmentor = TimeSeriesAugmentation(aug_config)
            print(f"  数据增强已启用（split={split}）")
        else:
            self.augmentor = None
        
        if self.use_multimodal:
            print(f"  多模态输入已启用（split={split}）")
        
        self.window_size = cfg['preprocessing']['window_size']
        self.step_size = cfg['preprocessing']['step_size']
        self.features = cfg['data']['features']
        self.num_features = len(self.features)
        
        # 如果数据还未加载，或强制重新加载
        if HRVDataset._all_samples is None or force_reload:
            self._load_and_prepare_data()
        
        # 获取当前split对应的样本
        self.samples = HRVDataset._patient_splits[split]
        self.scaler = HRVDataset._scaler
        
        print(f"\n{'='*60}")
        print(f"准备 {split.upper()} 数据集")
        print(f"{'='*60}")
        
        # 创建滑动窗口样本
        print(f"使用滑动窗口创建样本 (window_size={self.window_size}, step_size={self.step_size})...")
        self.windows = []
        
        for sample in self.samples:
            features = sample['features']
            age = sample['age']
            gender = sample['gender']
            userid = sample['userid']
            
            # 滑动窗口
            for start in range(0, len(features) - self.window_size + 1, self.step_size):
                end = start + self.window_size
                window_features = features[start:end]
                
                self.windows.append({
                    'features': window_features,
                    'age': age,
                    'gender': gender,
                    'userid': userid
                })
        
        # 统计信息
        unique_patients = len(set([s['userid'] for s in self.samples]))
        ages = [w['age'] for w in self.windows]
        genders = [w['gender'] for w in self.windows]
        
        print(f"\n{split.upper()} 集统计信息:")
        print(f"  患者数: {unique_patients}")
        print(f"  记录数: {len(self.samples)}")
        print(f"  窗口样本数: {len(self.windows)}")
        print(f"  年龄范围: {np.min(ages):.1f} - {np.max(ages):.1f}")
        print(f"  年龄均值: {np.mean(ages):.1f} ± {np.std(ages):.1f}")
        print(f"  性别分布: 男={np.sum(genders)} ({100*np.mean(genders):.1f}%), 女={len(genders)-np.sum(genders)} ({100*(1-np.mean(genders)):.1f}%)")
    
    def _load_and_prepare_data(self):
        """
        加载所有数据并按患者划分数据集（只执行一次）
        """
        print("\n" + "="*60)
        print("加载和准备数据（按患者划分）")
        print("="*60)
        
        # 加载配置
        cfg = load_config()
        
        # 加载标签文件
        label_file = cfg['data']['label_file']
        print(f"\n1. 加载标签文件: {label_file}")
        label_df = pd.read_csv(label_file)
        print(f"   标签文件共有 {len(label_df)} 条记录")
        
        # 处理性别标签：男->1, 女->0
        label_df['gender_label'] = (label_df['Gender'] == '男').astype(int)
        
        # 加载所有特征文件
        data_dir = cfg['data']['data_dir']
        print(f"\n2. 扫描特征目录: {data_dir}")
        feature_files = glob.glob(os.path.join(data_dir, "*_combined_features.csv"))
        print(f"   找到 {len(feature_files)} 个特征文件")
        
        # 加载数据
        print("\n3. 加载特征文件并匹配标签...")
        all_samples = []
        all_features_list = []
        matched_count = 0
        skipped_count = 0
        
        for i, file_path in enumerate(feature_files):
            if (i + 1) % 500 == 0:
                print(f"   处理进度: {i+1}/{len(feature_files)}")
            
            # 从文件名提取userid和recordid
            filename = os.path.basename(file_path)
            parts = filename.replace('_combined_features.csv', '').split('_')
            if len(parts) < 2:
                skipped_count += 1
                continue
            
            userid = parts[0]
            recordid = parts[1]
            
            # 在标签文件中查找匹配的记录
            label_row = label_df[
                (label_df['userid'] == userid) & 
                (label_df['recordid'] == int(recordid))
            ]
            
            if len(label_row) == 0:
                skipped_count += 1
                continue
            
            # 获取标签
            age = label_row.iloc[0]['age']
            gender = label_row.iloc[0]['gender_label']
            
            # 检查标签有效性
            if pd.isna(age) or pd.isna(gender):
                skipped_count += 1
                continue
            
            # 加载特征数据
            try:
                df = pd.read_csv(file_path)
            except Exception as e:
                skipped_count += 1
                continue
            
            # 检查是否包含所有需要的特征
            missing_features = [f for f in self.features if f not in df.columns]
            if missing_features:
                skipped_count += 1
                continue
            
            # 提取特征
            feature_data = df[cfg['data']['features']].values
            
            # 检查数据长度
            min_seq_len = cfg['preprocessing']['min_sequence_length']
            if len(feature_data) < min_seq_len:
                skipped_count += 1
                continue
            
            # 检查是否有NaN或Inf
            if np.isnan(feature_data).any() or np.isinf(feature_data).any():
                feature_data = np.nan_to_num(feature_data, nan=0.0, posinf=0.0, neginf=0.0)
            
            # 保存到样本列表（保留原始数据，稍后统一标准化）
            all_samples.append({
                'features': feature_data,
                'age': float(age),
                'gender': int(gender),
                'userid': userid,
                'recordid': recordid
            })
            
            # 收集用于标准化的数据
            all_features_list.append(feature_data)
            matched_count += 1
        
        print(f"\n   数据加载完成:")
        print(f"   - 成功匹配: {matched_count} 个样本")
        print(f"   - 跳过: {skipped_count} 个样本")
        
        if matched_count == 0:
            raise ValueError("没有成功加载任何样本！请检查数据路径和标签文件。")
        
        # 按患者划分数据集
        print(f"\n4. 按患者ID划分数据集...")
        unique_patients = list(set([s['userid'] for s in all_samples]))
        print(f"   找到 {len(unique_patients)} 个唯一患者")
        
        # 随机打乱患者ID（固定种子保证可复现）
        random_seed = cfg['split']['random_seed']
        np.random.seed(random_seed)
        np.random.shuffle(unique_patients)
        
        # 计算划分点
        n_patients = len(unique_patients)
        test_ratio = cfg['split']['test_ratio']
        val_ratio = cfg['split']['validation_ratio']
        n_test = int(n_patients * test_ratio)
        n_val = int(n_patients * val_ratio)
        n_train = n_patients - n_test - n_val
        
        # 划分患者ID
        train_patients = set(unique_patients[:n_train])
        val_patients = set(unique_patients[n_train:n_train+n_val])
        test_patients = set(unique_patients[n_train+n_val:])
        
        print(f"   患者划分: Train={len(train_patients)}, Val={len(val_patients)}, Test={len(test_patients)}")
        
        # 拟合标准化器（只使用训练集数据）
        print(f"\n5. 使用训练集数据拟合标准化器...")
        train_features_list = [s['features'] for s in all_samples if s['userid'] in train_patients]
        if len(train_features_list) == 0:
            raise ValueError("训练集为空！")
        
        train_features = np.vstack(train_features_list)
        scaler = StandardScaler()
        scaler.fit(train_features)
        print(f"   标准化器已拟合 (使用 {len(train_features_list)} 个训练样本)")
        
        # 标准化所有样本
        print(f"\n6. 标准化所有样本...")
        for sample in all_samples:
            sample['features'] = scaler.transform(sample['features'])
        
        # 按患者ID分配样本到不同的split
        patient_splits = {
            'train': [s for s in all_samples if s['userid'] in train_patients],
            'val': [s for s in all_samples if s['userid'] in val_patients],
            'test': [s for s in all_samples if s['userid'] in test_patients]
        }
        
        # 验证划分（确保没有患者重叠）
        print(f"\n7. 验证数据集划分...")
        train_ids = set([s['userid'] for s in patient_splits['train']])
        val_ids = set([s['userid'] for s in patient_splits['val']])
        test_ids = set([s['userid'] for s in patient_splits['test']])
        
        assert len(train_ids & val_ids) == 0, "训练集和验证集有患者重叠！"
        assert len(train_ids & test_ids) == 0, "训练集和测试集有患者重叠！"
        assert len(val_ids & test_ids) == 0, "验证集和测试集有患者重叠！"
        print(f"   ✓ 验证通过：训练/验证/测试集患者完全独立")
        
        print(f"\n   各数据集样本数:")
        print(f"   - Train: {len(patient_splits['train'])} 个记录 (来自 {len(train_ids)} 个患者)")
        print(f"   - Val: {len(patient_splits['val'])} 个记录 (来自 {len(val_ids)} 个患者)")
        print(f"   - Test: {len(patient_splits['test'])} 个记录 (来自 {len(test_ids)} 个患者)")
        
        # 保存到类变量
        HRVDataset._all_samples = all_samples
        HRVDataset._scaler = scaler
        HRVDataset._patient_splits = patient_splits
        
        print(f"\n{'='*60}")
        print("数据准备完成！")
        print(f"{'='*60}")
    
    def __len__(self):
        return len(self.windows)
    
    def __getitem__(self, idx):
        """
        Returns:
            如果use_multimodal=False:
                features: (window_size, num_features)
                age: scalar
                gender: scalar (0 or 1)
            
            如果use_multimodal=True:
                features: (window_size, num_features) - 时间序列数据
                tabular_features: (tabular_dim,) - 工程化特征+人口统计学数据
                age: scalar
                gender: scalar (0 or 1)
        """
        window = self.windows[idx]
        
        # 获取特征数据（numpy数组）
        features = window['features'].copy()  # (window_size, num_features)
        
        # 在训练模式下应用数据增强
        if self.enable_augmentation and self.augmentor is not None:
            features = self.augmentor.apply_random_augmentation(features)
        
        # 转换为Tensor
        features_tensor = torch.FloatTensor(features)     # (window_size, num_features)
        age = torch.FloatTensor([window['age']])          # (1,)
        gender = torch.LongTensor([window['gender']])     # (1,)
        
        # 如果使用多模态，计算工程化特征和表格数据
        if self.use_multimodal:
            # 从时间序列中提取统计特征（工程化特征）
            # 对每个特征计算：均值、标准差、最大值、最小值
            tabular_features_list = []
            
            for i in range(features.shape[1]):  # 遍历每个特征维度
                feature_col = features[:, i]
                
                # 统计特征
                mean_val = np.mean(feature_col)
                std_val = np.std(feature_col)
                max_val = np.max(feature_col)
                min_val = np.min(feature_col)
                
                tabular_features_list.extend([mean_val, std_val, max_val, min_val])
            
            # 添加人口统计学特征（性别）
            gender_feature = float(window['gender'])
            tabular_features_list.append(gender_feature)
            
            # 转换为Tensor
            tabular_features = torch.FloatTensor(tabular_features_list)
            
            return features_tensor, tabular_features, age, gender
        else:
            return features_tensor, age, gender


def get_dataloaders(cfg=None):
    """
    创建训练、验证和测试数据加载器
    """
    if cfg is None:
        cfg = load_config()
    
    print("="*60)
    print("创建数据加载器")
    print("="*60)
    
    train_dataset = HRVDataset(split='train', cfg=cfg)
    val_dataset = HRVDataset(split='val', cfg=cfg)
    test_dataset = HRVDataset(split='test', cfg=cfg)
    
    batch_size = cfg['training']['batch_size']
    num_workers = cfg['training']['num_workers']
    device = cfg['training']['device']
    
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True if device == 'cuda' else False
    )
    
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True if device == 'cuda' else False
    )
    
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True if device == 'cuda' else False
    )
    
    print("\n数据加载器创建完成！")
    print("="*60)
    
    return train_loader, val_loader, test_loader


