import os
import random
import numpy as np
import pandas as pd
from scipy.stats.mstats import winsorize
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import torch
from torch.utils.data import DataLoader, TensorDataset

# Parameters
#DATA_PATH = "../balanced_data"
#DATA_PATH = "../PROCESSED_ECSMP"
DATA_PATH = "../PROCESSED_EMOGNITION"
RANDOM_SEED = 42
TIMESTEPS = 10
BATCH_SIZE = 32
TRAIN_RATIO = 0.8
VAL_RATIO = 0.2
MAX_SAMPLES_PER_CLASS = 20000
#MIN_SAMPLES_PER_CLASS = 5000
MIN_SAMPLES_PER_CLASS = 10000
#SELECTED_COLUMNS = ['BVP','GSR_GSR']
SELECTED_COLUMNS = None

random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)

# --------------------------------
# 1. Load and Split Data Per User
# --------------------------------
def load_and_split_data(data_path=DATA_PATH):
    train_parts, val_parts, test_parts = [], [], []

    all_files = [os.path.join(data_path, f) for f in os.listdir(data_path) if f.endswith(".csv")]

    for file in all_files:
        df = pd.read_csv(file)
        if 'user_id' not in df.columns:
            continue

        if SELECTED_COLUMNS:
            df = df[['user_id'] + SELECTED_COLUMNS]

        #if 'BVP' in df.columns:
        #    df['BVP'] = winsorize(df['BVP'], limits=[0.05, 0.05])

        # Winsorize all columns except 'user_id'
        for col in df.columns:
            if col != 'user_id':
                df[col] = winsorize(df[col], limits=[0.05, 0.05])

        for user_id, user_data in df.groupby('user_id'):

            if len(user_data) < MIN_SAMPLES_PER_CLASS:
                continue

            user_data = user_data.sample(frac=1.0, random_state=RANDOM_SEED)

            if len(user_data) > MAX_SAMPLES_PER_CLASS:
                user_data = user_data.iloc[:MAX_SAMPLES_PER_CLASS]

            split1 = int(TRAIN_RATIO * len(user_data))
            train_val = user_data.iloc[:split1]
            test = user_data.iloc[split1:]

            split2 = int(VAL_RATIO * len(train_val))
            val = train_val.iloc[:split2]
            train = train_val.iloc[split2:]

            train_parts.append(train)
            val_parts.append(val)
            test_parts.append(test)

    train_df = pd.concat(train_parts, ignore_index=True)
    val_df = pd.concat(val_parts, ignore_index=True)
    test_df = pd.concat(test_parts, ignore_index=True)

    return train_df, val_df, test_df

# --------------------------------
# 2. Normalize Features + One-Hot Encode Labels
# --------------------------------
def normalize_and_encode(train_df, val_df, test_df):
    X_train = train_df.drop(columns=["user_id"]).values
    y_train = train_df["user_id"].values

    X_val = val_df.drop(columns=["user_id"]).values
    y_val = val_df["user_id"].values

    X_test = test_df.drop(columns=["user_id"]).values
    y_test = test_df["user_id"].values

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_val = scaler.transform(X_val)
    X_test = scaler.transform(X_test)

    encoder = OneHotEncoder(sparse_output=False)
    y_train = encoder.fit_transform(y_train.reshape(-1, 1))
    y_val = encoder.transform(y_val.reshape(-1, 1))
    y_test = encoder.transform(y_test.reshape(-1, 1))

    return X_train, y_train, X_val, y_val, X_test, y_test, encoder

# --------------------------------
# 3. Create Sliding Windows
# --------------------------------
def create_sliding_windows(X, y, timesteps):
    X_windows = []
    y_labels = []
    for i in range(len(X) - timesteps + 1):
        X_windows.append(X[i:i+timesteps])
        y_labels.append(y[i + timesteps - 1])  # label at the end of window
    return np.array(X_windows), np.array(y_labels)

# --------------------------------
# 4. Final Preparation: Get DataLoaders
# --------------------------------
def prepare_dataloaders(batch_size=BATCH_SIZE, timesteps=TIMESTEPS):
    train_df, val_df, test_df = load_and_split_data(DATA_PATH)
    X_train, y_train, X_val, y_val, X_test, y_test, encoder = normalize_and_encode(train_df, val_df, test_df)

    X_train_win, y_train_win = create_sliding_windows(X_train, y_train, timesteps)
    X_val_win, y_val_win = create_sliding_windows(X_val, y_val, timesteps)
    X_test_win, y_test_win = create_sliding_windows(X_test, y_test, timesteps)

    train_tensor = TensorDataset(
        torch.tensor(X_train_win, dtype=torch.float32),
        torch.tensor(y_train_win, dtype=torch.float32)
    )
    val_tensor = TensorDataset(
        torch.tensor(X_val_win, dtype=torch.float32),
        torch.tensor(y_val_win, dtype=torch.float32)
    )
    test_tensor = TensorDataset(
        torch.tensor(X_test_win, dtype=torch.float32),
        torch.tensor(y_test_win, dtype=torch.float32)
    )

    train_loader = DataLoader(train_tensor, batch_size=batch_size, shuffle=True, drop_last=True)
    val_loader = DataLoader(val_tensor, batch_size=batch_size, shuffle=False, drop_last=True)
    test_loader = DataLoader(test_tensor, batch_size=batch_size, shuffle=False)

    num_classes = y_train.shape[1]
    return train_loader, val_loader, test_loader, num_classes


def load_and_split_data3(data_path=DATA_PATH):
    windows_by_user = {}

    all_files = [os.path.join(data_path, f) for f in os.listdir(data_path) if f.endswith(".csv")]

    for file in all_files:
        df = pd.read_csv(file)
        if 'user_id' not in df.columns:
            continue

        if SELECTED_COLUMNS:
            df = df[['user_id'] + SELECTED_COLUMNS]

        # Winsorize all columns except 'user_id'
        for col in df.columns:
            if col != 'user_id':
                df[col] = winsorize(df[col], limits=[0.05, 0.05])

        # Process each user separately
        for user_id, user_data in df.groupby('user_id'):
            if len(user_data) < MIN_SAMPLES_PER_CLASS + TIMESTEPS:
                continue

            # Create sliding windows first (before any splitting)
            X = user_data.drop(columns=["user_id"]).values
            y = np.array([user_id] * len(X))

            X_windows = []
            for i in range(len(X) - TIMESTEPS + 1):
                X_windows.append(X[i:i + TIMESTEPS])

            if len(X_windows) > MAX_SAMPLES_PER_CLASS:
                # Limit windows if needed
                indices = np.random.choice(len(X_windows), MAX_SAMPLES_PER_CLASS, replace=False)
                X_windows = [X_windows[i] for i in indices]

            windows_by_user[user_id] = np.array(X_windows)

    # Now split the windows by user
    train_X, val_X, test_X = [], [], []
    train_y, val_y, test_y = [], [], []

    for user_id, windows in windows_by_user.items():
        # Create indices for splitting (NOT splitting the raw data)
        indices = np.random.permutation(len(windows))
        split1 = int(TRAIN_RATIO * len(windows))
        split2 = int((TRAIN_RATIO - VAL_RATIO) * len(windows))

        train_indices = indices[:split2]
        val_indices = indices[split2:split1]
        test_indices = indices[split1:]

        # Use these indices to split the windows
        train_X.append(windows[train_indices])
        train_y.extend([user_id] * len(train_indices))

        val_X.append(windows[val_indices])
        val_y.extend([user_id] * len(val_indices))

        test_X.append(windows[test_indices])
        test_y.extend([user_id] * len(test_indices))

    # Combine all users' data
    train_X = np.vstack(train_X)
    val_X = np.vstack(val_X)
    test_X = np.vstack(test_X)

    return train_X, np.array(train_y), val_X, np.array(val_y), test_X, np.array(test_y)


def prepare_dataloaders3(batch_size=BATCH_SIZE):
    X_train, y_train, X_val, y_val, X_test, y_test = load_and_split_data(DATA_PATH)

    # Normalize features
    original_shape = X_train.shape
    X_train = X_train.reshape(-1, X_train.shape[-1])
    X_val = X_val.reshape(-1, X_val.shape[-1])
    X_test = X_test.reshape(-1, X_test.shape[-1])

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_val = scaler.transform(X_val)
    X_test = scaler.transform(X_test)

    # Reshape back to windows
    X_train = X_train.reshape(original_shape[0], original_shape[1], -1)
    X_val = X_val.reshape(X_val.shape[0] // original_shape[1], original_shape[1], -1)
    X_test = X_test.reshape(X_test.shape[0] // original_shape[1], original_shape[1], -1)

    # One-hot encode labels
    encoder = OneHotEncoder(sparse_output=False)
    y_train = encoder.fit_transform(y_train.reshape(-1, 1))
    y_val = encoder.transform(y_val.reshape(-1, 1))
    y_test = encoder.transform(y_test.reshape(-1, 1))

    # Create datasets and dataloaders
    train_tensor = TensorDataset(
        torch.tensor(X_train, dtype=torch.float32),
        torch.tensor(y_train, dtype=torch.float32)
    )
    val_tensor = TensorDataset(
        torch.tensor(X_val, dtype=torch.float32),
        torch.tensor(y_val, dtype=torch.float32)
    )
    test_tensor = TensorDataset(
        torch.tensor(X_test, dtype=torch.float32),
        torch.tensor(y_test, dtype=torch.float32)
    )

    train_loader = DataLoader(train_tensor, batch_size=batch_size, shuffle=True, drop_last=True)
    val_loader = DataLoader(val_tensor, batch_size=batch_size, shuffle=False, drop_last=True)
    test_loader = DataLoader(test_tensor, batch_size=batch_size, shuffle=False)

    num_classes = y_train.shape[1]
    return train_loader, val_loader, test_loader, num_classes