import os
import numpy as np
import pandas as pd
from scipy.stats.mstats import winsorize
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler

DATA_PATH = "../PROCESSED_ECSMP/balanced_data"
RANDOM_SEED = 42
#WINDOW_SIZE = 5
WINDOW_SIZE = 10
BATCH_SIZE = 32
STRIDE = WINDOW_SIZE
MAX_SAMPLES_PER_CLASS = 15000
#MAX_SAMPLES_PER_CLASS = 25000


def create_windows(df, window_size=WINDOW_SIZE, stride=STRIDE):
    """Create windows of data from a DataFrame"""
    windows = []
    labels = []

    if len(df) < window_size:
        return [], []

    # Get the user_id for this DataFrame
    user_id = df['user_id'].iloc[0]

    # Create windows with stride
    for i in range(0, len(df) - window_size + 1, stride):
        # Extract the window
        window = df.iloc[i:i + window_size].drop('user_id', axis=1).values
        windows.append(window)
        labels.append(user_id)

    return windows, labels


def prepare_splits(data_path=DATA_PATH, random_seed=RANDOM_SEED, max_samples_per_class=MAX_SAMPLES_PER_CLASS):
    all_files = [os.path.join(data_path, file) for file in os.listdir(data_path) if file.endswith('.csv')]

    train_windows = []
    train_labels = []
    test_windows = []
    test_labels = []

    # Track processed user IDs to count classes
    processed_users = set()

    for file in tqdm(all_files, desc="Processing files"):
        try:
            print(f"Processing {file}")
            # Read the file
            df = pd.read_csv(file)

            # Validate the file has user_id and enough rows
            if 'user_id' not in df.columns or len(df) < WINDOW_SIZE:
                continue

            user_id = df['user_id'].iloc[0]

            # Sample if more than max_samples_per_class
            if len(df) > max_samples_per_class:
                df = df.sample(n=max_samples_per_class, random_state=random_seed)

            # Skip users with insufficient data
            if len(df) < 5000:
                continue

            # Winsorize the BVP column if it exists
            if 'BVP' in df.columns:
                df['BVP'] = winsorize(df['BVP'], limits=[0.05, 0.05])

            # Split into train and test sets
            train_df, test_df = train_test_split(df, test_size=0.2, random_state=random_seed)

            # Create windows for training data
            user_train_windows, user_train_labels = create_windows(train_df)
            train_windows.extend(user_train_windows)
            train_labels.extend(user_train_labels)

            # Create windows for test data
            user_test_windows, user_test_labels = create_windows(test_df)
            test_windows.extend(user_test_windows)
            test_labels.extend(user_test_labels)

            processed_users.add(user_id)


        except Exception as e:
            print(f"Error processing {file}: {e}")

    # Map user IDs to indices
    unique_users = sorted(list(set(processed_users)))
    user_to_idx = {user: idx for idx, user in enumerate(unique_users)}

    train_labels = [user_to_idx[label] for label in train_labels]
    test_labels = [user_to_idx[label] for label in test_labels]

    mapping_df = pd.DataFrame(list(user_to_idx.items()), columns=['original_user_id', 'class_idx'])
    mapping_df.to_csv('../MERGED_ECSMP_BVP/user_id_mapping.csv', index=False)

    # Convert to numpy arrays
    X_train = np.array(train_windows)
    y_train = np.array(train_labels)
    X_test = np.array(test_windows)
    y_test = np.array(test_labels)

    X_train_flat = X_train.reshape(-1, X_train.shape[-1])
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train_flat).reshape(X_train.shape)
    X_train = X_train_scaled

    X_test_flat = X_test.reshape(-1, X_test.shape[-1])
    X_test_scaled = scaler.transform(X_test_flat).reshape(X_test.shape)
    X_test = X_test_scaled

    print(f"Number of classes: {len(processed_users)}")
    print(f"Training data: {X_train.shape}, Labels: {y_train.shape}")
    print(f"Test data: {X_test.shape}, Labels: {y_test.shape}")

    # Convert to PyTorch tensors
    X_train_tensor = torch.tensor(X_train, dtype=torch.float32).transpose(1,2)
    y_train_tensor = torch.tensor(y_train, dtype=torch.long)
    X_test_tensor = torch.tensor(X_test, dtype=torch.float32).transpose(1,2)
    y_test_tensor = torch.tensor(y_test, dtype=torch.long)

    # Create datasets and data loaders
    train_dataset = torch.utils.data.TensorDataset(X_train_tensor, y_train_tensor)
    test_dataset = torch.utils.data.TensorDataset(X_test_tensor, y_test_tensor)

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

    return train_loader, test_loader, len(processed_users)

