# Import necessary libraries
import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from imblearn.over_sampling import RandomOverSampler

"""
This script is used to find and replace missing data in the dataset and then fix the imbalance between samples for different users.
"""
# -------------------------------
# Step 1: Load and Process Data (Handle Missing Values)
# -------------------------------
data_path = r"../PROCESSED_EMOGNITION"  # Original data directory
output_path = r"../PROCESSED_EMOGNITION"  # Directory to save processed data
os.makedirs(output_path, exist_ok=True)

files = [os.path.join(data_path, file) for file in os.listdir(data_path) if file.endswith(".csv")]

column_names = None  # To store column names
for file in files:
    print(f"Processing file: {file}")

    # Load data
    data = pd.read_csv(file)
    if column_names is None:
        column_names = data.columns  # Preserve column names

    user_id = data.iloc[0, -1]  # Assuming the last column contains the user ID

    # Check for missing values
    if data.isnull().values.any():
        print("Missing values found. Imputing missing values...")
        imputer = SimpleImputer(strategy='mean')
        data.iloc[:, :-1] = imputer.fit_transform(data.iloc[:, :-1])  # Impute only feature columns

        # Save the processed data
        output_file = os.path.join(output_path, f"user_{int(user_id)}.csv")
        print(f"Saving processed file: {output_file}")
        data.to_csv(output_file, index=False)
    else:
        print("No missing values found. Skipping file save.")
        output_file = os.path.join(output_path, f"user_{int(user_id)}.csv")
        data.to_csv(output_file, index=False)


print("Step 1 complete: All individual files processed and saved if necessary.")

# -------------------------------
# Step 2: Combine All Files for Balancing
# -------------------------------
print("Combining all processed files for balancing...")

processed_files = [os.path.join(output_path, file) for file in os.listdir(output_path) if file.endswith(".csv")]
all_data = []
all_labels = []

for file in processed_files:
    data = pd.read_csv(file)
    all_data.append(data.iloc[:, :-1].values)  # Features (exclude last column)
    all_labels.append(data.iloc[:, -1].values[0])  # User ID (same for all rows)

for i, data in enumerate(all_data):
    if data.shape[1] != all_data[0].shape[1]:
        print(f"Array {i} has a different shape: {data.shape}")
        print(data)

X = np.vstack(all_data)  # Combine features from all users
y = np.hstack([[label] * len(data) for label, data in zip(all_labels, all_data)])  # Combine labels

# -------------------------------
# Step 3: Save the Imputed Data
# -------------------------------
print("All processing complete. Files with imputed values have been saved to:", output_path)
print("No oversampling was performed as requested.")

# Optional: Generate a report of the processed files
print("\nProcessed files summary:")
for file in os.listdir(output_path):
    if file.endswith(".csv"):
        file_path = os.path.join(output_path, file)
        df = pd.read_csv(file_path)
        user_id = df.iloc[0, -1]
        print(f"User {int(user_id)}: {len(df)} samples")

print("\nFurther analysis can now be performed on the processed data...")