import os
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm

# Constants
COMMON_SAMPLING_RATE = 4  # Hz - target sampling rate
SOURCE_DIR = "../UBFC/"
OUTPUT_DIR = "../PROCESSED_UBFC/"
os.makedirs(OUTPUT_DIR, exist_ok=True)


def read_and_concat_csv_files(base_path, user_id, signal_type):
    """Read and concatenate T1, T2, T3 files for a given signal type."""
    dfs = []

    for trial in [1, 2, 3]:
        file_path = os.path.join(base_path, f"{signal_type}_s{user_id}_T{trial}.csv")
        if os.path.exists(file_path):
            df = pd.read_csv(file_path, header=None)
            # Skip empty files
            if not df.empty:
                dfs.append(df)
            else:
                print(f"Warning: {file_path} is empty")
        else:
            print(f"Warning: {file_path} does not exist")

    if not dfs:
        return pd.DataFrame()

    return pd.concat(dfs, ignore_index=True)


def resample_signal(data, original_rate, target_rate, signal_type=""):
    """Resample signal from original_rate to target_rate."""
    if data.empty:
        return pd.DataFrame()

    # Generate timestamps based on the original sampling rate
    data["timestamp"] = np.arange(0, len(data) / original_rate, 1 / original_rate)
    data.set_index("timestamp", inplace=True)

    # Generate target timestamps
    target_timestamps = np.arange(0, data.index.max() + (1 / target_rate), 1 / target_rate)

    if target_rate < original_rate:
        # Downsample by bins/averaging
        df_list = []
        for i in range(len(target_timestamps) - 1):
            t_start = target_timestamps[i]
            t_end = target_timestamps[i + 1]

            # Slice data in [t_start, t_end)
            subset = data.loc[(data.index >= t_start) & (data.index < t_end)]

            if not subset.empty:
                mean_val = subset[0].mean()
                df_list.append([t_start, mean_val])


            else:
                df_list.append([t_start, np.nan])

        # Build a DataFrame aligned with target_timestamps
        result_df = pd.DataFrame(df_list, columns=["timestamp", "value"])

        # Add the last timestamp
        if len(target_timestamps) > 0:
            result_df.loc[len(result_df)] = [target_timestamps[-1], np.nan]

    else:
        # Upsampling or same rate - use interpolation
        reindexed = data.reindex(target_timestamps).interpolate()
        result_df = pd.DataFrame({
            "timestamp": target_timestamps,
            "value": reindexed[0].values
        })

    return result_df


def process_user(user_dir, user_id):
    """Process a single user's data."""
    print(f"Processing user: s{user_id}")

    # 1. Read and concatenate BVP (64Hz) and EDA (4Hz) files
    bvp_data = read_and_concat_csv_files(user_dir, user_id, "bvp")
    eda_data = read_and_concat_csv_files(user_dir, user_id, "eda")

    if bvp_data.empty and eda_data.empty:
        print(f"No data found for user s{user_id}")
        return None

    # 2. Resample both signals to 1 Hz
    bvp_resampled = resample_signal(bvp_data, 64, COMMON_SAMPLING_RATE)
    eda_resampled = resample_signal(eda_data, 4, COMMON_SAMPLING_RATE, signal_type="eda")

    # 3. Merge signals on timestamp
    if not bvp_resampled.empty and not eda_resampled.empty:
        merged = pd.merge(bvp_resampled, eda_resampled, on="timestamp", how="outer", suffixes=('_bvp', '_eda'))
        merged.rename(columns={"value_bvp": "BVP", "value_eda": "GSR_GSR"}, inplace=True)
    elif not bvp_resampled.empty:
        merged = bvp_resampled.rename(columns={"value": "BVP"})
        merged["GSR_GSR"] = np.nan
    elif not eda_resampled.empty:
        merged = eda_resampled.rename(columns={"value": "GSR_GSR"})
        merged["BVP"] = np.nan
    else:
        print(f"Both signals empty after resampling for user s{user_id}")
        return None

    # 4. Add user_id column
    merged["user_id"] = int(user_id)

    # 5. Remove timestamp column before saving
    merged_no_timestamp = merged.drop(columns=["timestamp"])

    # 6. Remove first and last row for consistency with ECSMP processing
    if len(merged_no_timestamp) > 2:
        merged_no_timestamp = merged_no_timestamp.iloc[1:-1].reset_index(drop=True)

    return merged_no_timestamp


def main():
    # Find all user folders
    user_folders = []
    for item in os.listdir(SOURCE_DIR):
        item_path = os.path.join(SOURCE_DIR, item)
        if os.path.isdir(item_path) and item.startswith('s') and item[1:].isdigit():
            user_folders.append((item_path, item[1:]))  # Store path and user_id

    # Process each user
    for user_dir, user_id in tqdm(user_folders, desc="Processing users"):
        merged_data = process_user(user_dir, user_id)

        if merged_data is not None and not merged_data.empty:
            output_path = os.path.join(OUTPUT_DIR, f"user_{user_id}.csv")
            merged_data.to_csv(output_path, index=False)
            print(f"Saved processed data for user s{user_id} to {output_path}")


if __name__ == "__main__":
    main()