# src/data_loader.py
# 注意：此文件已不再使用，现在使用 src/dataset.py
# 保留此文件仅供参考

import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from src.config_loader import load_config

class TimeSeriesDataset(Dataset):
    """Custom PyTorch Dataset for windowed time series data."""
    def __init__(self, windows, ages, genders):
        self.windows = torch.tensor(windows, dtype=torch.float32)
        self.ages = torch.tensor(ages, dtype=torch.float32)
        self.genders = torch.tensor(genders, dtype=torch.float32)

    def __len__(self):
        return len(self.windows)

    def __getitem__(self, idx):
        # The window shape should be (num_channels, window_size)
        return self.windows[idx], self.ages[idx], self.genders[idx]

def create_sliding_windows(data, window_size, step_size):
    """
    Generates sliding windows from the time series data.
    Assumes `data` is a NumPy array of shape (num_time_steps, num_features).
    """
    windows = []
    for i in range(0, len(data) - window_size + 1, step_size):
        windows.append(data[i:i + window_size])
    return np.array(windows)

def get_data_loaders(cfg=None):
    """
    Main function to load data, preprocess, and create PyTorch DataLoaders.
    
    注意：此函数已过时，请使用 src/dataset.py 中的实现
    """
    if cfg is None:
        cfg = load_config()
    
    # 1. Load data
    df = pd.read_csv(cfg['data']['data_path'])

    # Assume a 'patient_id' column for grouping
    # This is crucial to prevent data leakage between train/val/test sets
    patient_ids = df['patient_id'].unique()
    test_ratio = cfg['split']['test_ratio']
    val_ratio = cfg['split']['validation_ratio']
    train_val_ids, test_ids = train_test_split(patient_ids, test_size=test_ratio, random_state=42)
    train_ids, val_ids = train_test_split(train_val_ids, test_size=val_ratio / (1 - test_ratio), random_state=42)

    train_df = df[df['patient_id'].isin(train_ids)]
    val_df = df[df['patient_id'].isin(val_ids)]
    test_df = df[df['patient_id'].isin(test_ids)]

    # 2. Feature Scaling
    # Fit the scaler ONLY on the training data to prevent data leakage
    features = cfg['data']['features']
    scaler = MinMaxScaler()
    train_features = train_df[features].values
    train_df_scaled = scaler.fit_transform(train_features)
    # Transform validation and test data with the same scaler
    val_features = val_df[features].values
    val_df_scaled = scaler.transform(val_features)
    test_features = test_df[features].values
    test_df_scaled = scaler.transform(test_features)

    # 3. Create windowed datasets for each split
    window_size = cfg['preprocessing']['window_size']
    step_size = cfg['preprocessing']['step_size']
    age_col = 'age'
    gender_col = 'gender'
    
    def process_df_to_windows(subset_df, scaled_features):
        windows, ages, genders = [], [], []
        
        # Get patient IDs from subset
        patient_ids_in_subset = subset_df['patient_id'].unique()
        
        # Keep track of index offset for scaled features
        idx_offset = 0
        for patient_id in patient_ids_in_subset:
            patient_data = subset_df[subset_df['patient_id'] == patient_id]
            num_samples = len(patient_data)
            
            # Extract features for this patient from scaled array
            patient_features = scaled_features[idx_offset:idx_offset + num_samples]
            idx_offset += num_samples
            
            # Create windows for this patient
            # The shape of patient_windows will be (num_windows, window_size, num_channels)
            if len(patient_features) < window_size:
                continue  # Skip patients with insufficient data
                
            patient_windows = create_sliding_windows(patient_features, window_size, step_size)
            patient_windows = np.transpose(patient_windows, (0, 2, 1))  # Reshape to (num_windows, num_channels, window_size)

            # Get corresponding labels
            num_windows_created = len(patient_windows)
            age_label = patient_data[age_col].iloc[0]
            gender_label = patient_data[gender_col].iloc[0]
            
            windows.extend(patient_windows)
            ages.extend([age_label] * num_windows_created)
            genders.extend([gender_label] * num_windows_created)
            
        return np.array(windows), np.array(ages), np.array(genders)

    train_windows, train_ages, train_genders = process_df_to_windows(train_df, train_df_scaled)
    val_windows, val_ages, val_genders = process_df_to_windows(val_df, val_df_scaled)
    test_windows, test_ages, test_genders = process_df_to_windows(test_df, test_df_scaled)

    # 4. Create Datasets and DataLoaders
    batch_size = cfg['training']['batch_size']
    num_workers = cfg['training']['num_workers']
    
    train_dataset = TimeSeriesDataset(train_windows, train_ages, train_genders)
    val_dataset = TimeSeriesDataset(val_windows, val_ages, val_genders)
    test_dataset = TimeSeriesDataset(test_windows, test_ages, test_genders)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    
    print(f"Train loader: {len(train_loader.dataset)} samples")
    print(f"Validation loader: {len(val_loader.dataset)} samples")
    print(f"Test loader: {len(test_loader.dataset)} samples")

    return train_loader, val_loader, test_loader


