import os
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime

# Constants
COMMON_SAMPLING_RATE = 4  # Hz - target sampling rate
SOURCE_DIR = "../EMOGNITION_filtered/"
OUTPUT_DIR = "../PROCESSED_EMOGNITION/"
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Define the emotion order
EMOTION_ORDER = [
    "BASELINE",  # No washout phase
    "NEUTRAL",
    "AWE",
    "DISGUST",
    "SURPRISE",
    "ANGER",
    "ENTHUSIASM",
    "LIKING",
    "FEAR",
    "AMUSEMENT",
    "SADNESS"
]


def parse_timestamp(ts_str):
    """Convert timestamp string to Unix timestamp."""
    try:
        # Format: 2020-07-16T10:03:19:005513
        dt_format = "%Y-%m-%dT%H:%M:%S:%f"
        dt = datetime.strptime(ts_str, dt_format)
        return dt.timestamp()
    except ValueError:
        print(f"Warning: Could not parse timestamp {ts_str}")
        return None


def extract_data_from_json(file_path):
    """Extract data from a JSON file."""
    with open(file_path, 'r') as f:
        data = json.load(f)

    result = {}

    # Process BVP data if available
    if "BVP" in data:
        timestamps = []
        values = []
        for entry in data["BVP"]:
            ts = parse_timestamp(entry[0])
            if ts is not None:
                timestamps.append(ts)
                values.append(entry[1])
        result["BVP"] = {"timestamps": timestamps, "values": values}

    # Process EDA data if available
    if "EDA" in data:
        timestamps = []
        values = []
        for entry in data["EDA"]:
            ts = parse_timestamp(entry[0])
            if ts is not None:
                timestamps.append(ts)
                values.append(entry[1])
        result["GSR_GSR"] = {"timestamps": timestamps, "values": values}

    # Process TEMP data if available
    if "TEMP" in data:
        timestamps = []
        values = []
        for entry in data["TEMP"]:
            ts = parse_timestamp(entry[0])
            if ts is not None:
                timestamps.append(ts)
                values.append(entry[1])
        result["TEMP_Temp"] = {"timestamps": timestamps, "values": values}

    # Process ACC data if available (has x, y, z components)
    if "ACC" in data:
        timestamps = []
        x_values = []
        y_values = []
        z_values = []
        for entry in data["ACC"]:
            ts = parse_timestamp(entry[0])
            if ts is not None:
                timestamps.append(ts)
                x_values.append(entry[1])
                y_values.append(entry[2])
                z_values.append(entry[3])

        result["ACC_x"] = {"timestamps": timestamps, "values": x_values}
        result["ACC_y"] = {"timestamps": timestamps, "values": y_values}
        result["ACC_z"] = {"timestamps": timestamps, "values": z_values}

    return result


def resample_signal(timestamps, values, target_rate):
    """Resample signal to target rate."""
    if not timestamps or not values:
        return pd.DataFrame()

    # Create DataFrame with original data
    df = pd.DataFrame({"timestamp": timestamps, "value": values})
    df.sort_values("timestamp", inplace=True)

    # Set timestamp as index for resampling
    df.set_index("timestamp", inplace=True)

    # Generate target timestamps
    min_time = df.index.min()
    max_time = df.index.max()
    target_timestamps = np.arange(min_time, max_time + (1 / target_rate), 1 / target_rate)

    # Determine original sampling rate (approximate)
    original_rate = len(timestamps) / (max_time - min_time) if max_time > min_time else target_rate

    if target_rate < original_rate:
        # Downsample by bins/averaging
        df_list = []
        for i in range(len(target_timestamps) - 1):
            t_start = target_timestamps[i]
            t_end = target_timestamps[i + 1]

            subset = df.loc[(df.index >= t_start) & (df.index < t_end)]
            if not subset.empty:
                mean_val = subset["value"].mean()
                df_list.append([t_start, mean_val])
            else:
                df_list.append([t_start, np.nan])

        # Add the last timestamp
        df_list.append([target_timestamps[-1], np.nan])
        result_df = pd.DataFrame(df_list, columns=["timestamp", "value"])
    else:
        # Upsampling or same rate - use interpolation
        reindexed = df.reindex(target_timestamps).interpolate()
        result_df = pd.DataFrame({
            "timestamp": target_timestamps,
            "value": reindexed["value"].values
        })

    return result_df


def process_user(user_id):
    """Process data for a single user."""
    print(f"Processing user: {user_id}")
    user_dir = os.path.join(SOURCE_DIR, user_id)

    if not os.path.isdir(user_dir):
        print(f"Directory not found for user {user_id}")
        return None

    # Get all JSON files for this user
    json_files = [f for f in os.listdir(user_dir) if f.endswith("_EMPATICA.json")]

    # Dictionary to store all measurements
    all_measurements = {
        "BVP": [],
        "GSR_GSR": [],
        "TEMP_Temp": [],
        "ACC_x": [],
        "ACC_y": [],
        "ACC_z": []
    }

    # Process all JSON files
    for json_file in json_files:
        file_path = os.path.join(user_dir, json_file)
        data = extract_data_from_json(file_path)

        # Resample each measurement and store
        for measure_name, measure_data in data.items():
            resampled = resample_signal(
                measure_data["timestamps"],
                measure_data["values"],
                COMMON_SAMPLING_RATE
            )
            if not resampled.empty:
                all_measurements[measure_name].append(resampled)

    # Merge all measurements by concatenating their dataframes
    merged_data = pd.DataFrame()

    for measure_name, dfs in all_measurements.items():
        if dfs:
            # Concatenate all dataframes for this measurement
            measure_df = pd.concat(dfs, ignore_index=True)

            # Sort by timestamp
            measure_df = measure_df.sort_values("timestamp").reset_index(drop=True)

            # If this is the first measurement, use it as the base for merging
            if merged_data.empty:
                merged_data = pd.DataFrame({"timestamp": measure_df["timestamp"]})
                merged_data[measure_name] = measure_df["value"]
            else:
                # Otherwise, merge with existing data
                temp_df = pd.DataFrame({
                    "timestamp": measure_df["timestamp"],
                    measure_name: measure_df["value"]
                })
                merged_data = pd.merge_asof(
                    merged_data.sort_values("timestamp"),
                    temp_df.sort_values("timestamp"),
                    on="timestamp",
                    direction="nearest"
                )

    if merged_data.empty:
        print(f"No data processed for user {user_id}")
        return None

    # Add user_id column
    merged_data["user_id"] = int(user_id)

    # Remove timestamp column before saving
    merged_no_timestamp = merged_data.drop(columns=["timestamp"])

    # Remove first and last row for consistency with other datasets
    if len(merged_no_timestamp) > 2:
        merged_no_timestamp = merged_no_timestamp.iloc[1:-1].reset_index(drop=True)

    return merged_no_timestamp


def main():
    # Find all user directories (numbered folders)
    user_dirs = []
    for item in os.listdir(SOURCE_DIR):
        if item.isdigit() and os.path.isdir(os.path.join(SOURCE_DIR, item)):
            user_dirs.append(item)

    # Process each user
    for user_id in tqdm(user_dirs, desc="Processing users"):
        merged_data = process_user(user_id)

        if merged_data is not None and not merged_data.empty:
            output_path = os.path.join(OUTPUT_DIR, f"user_{user_id}.csv")
            merged_data.to_csv(output_path, index=False)
            print(f"Saved processed data for user {user_id} to {output_path}")


if __name__ == "__main__":
    main()