import os

import pandas as pd
import numpy as np
import torch
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import joblib

def lstm_dataloader_slide_window(file_path='./load.csv', window_size=10, stride=1, test_size=0.2):

    """
    TodoDone： 2 recommend design ：slide window features：
    Padding: If a window at the end of a sequence is shorter than window_size, it is zero-padded to ensure uniform length.
        :param file_path:
        :param window_size: Determines the length of each sliding window (10 time steps in this case).
        :param stride: Controls the step size for the sliding window, affecting the overlap between windows (a stride of 1 will overlap by 9 time steps).
        :param test_size:
        :return:

    Todo 1 fearure data filter
    """
    # Load the data
    data = pd.read_csv(file_path)


    # Select feature columns excluding 'ID', 'time', 'Y', and 'S'
    feature_columns = data.columns.difference(['ID', 'time', 'Y', 'S'])
    data_features = data[feature_columns]

    # Normalize the selected feature data
    scaler = MinMaxScaler()
    scaler.fit(data_features)  # Fit on your training data

    # Save the scaler for later use
    joblib.dump(scaler, 'scaler.pkl')

    data_features_normalized = pd.DataFrame(scaler.fit_transform(data_features), columns=feature_columns)

    # Reassemble the data with 'ID', 'time', 'Y', 'S', and normalized features
    data_normalized = pd.concat([data[['ID', 'time', 'Y', 'S']].reset_index(drop=True), data_features_normalized],
                                axis=1)

    # Prepare input sequences using a sliding window
    feature_windows = []
    label_windows = []

    for _, group in data_normalized.groupby('ID'):
        features = group[feature_columns].values  # Only normalized features
        labels = group[['Y']].values  # 'Y' as label

        # Apply sliding window to create more samples
        for i in range(0, len(features) - window_size + 1, stride):
            windowed_features = features[i:i + window_size]
            windowed_labels = labels[i:i + window_size]

            # Pad if the window is smaller than the specified size
            if len(windowed_features) < window_size:
                windowed_features = np.pad(windowed_features, ((0, window_size - len(windowed_features)), (0, 0)),
                                           'constant')
                windowed_labels = np.pad(windowed_labels, ((0, window_size - len(windowed_labels)), (0, 0)), 'constant')

            feature_windows.append(windowed_features)
            label_windows.append(windowed_labels)

    # Convert lists to numpy arrays and then to PyTorch tensors
    features_tensor = torch.tensor(np.array(feature_windows), dtype=torch.float32)
    labels_tensor = torch.tensor(np.array(label_windows), dtype=torch.float32)

    # Train-test split
    train_features, test_features, train_labels, test_labels = train_test_split(
        features_tensor, labels_tensor, test_size=test_size, random_state=42
    )

    # Verify the shapes of train and test sets
    print("Train Features Shape:", train_features.shape)
    print("Train Labels Shape:", train_labels.shape)
    print("Test Features Shape:", test_features.shape)
    print("Test Labels Shape:", test_labels.shape)

    return train_features, test_features, train_labels, test_labels

# Usage:
# train_features, test_features, train_labels, test_labels = lstm_dataloader(file_path='load1.csv', window_size=10, stride=1, test_size=0.2)
