import pandas as pd

def aggregate_speculative_data(input_csv_path: str, output_csv_path: str):
    """
    Aggregates speculative decoding benchmark data from a CSV file.

    For each unique combination of (batch_size, speculative_num_draft_tokens,
    speculative_num_steps, top_k), it calculates the average of the other
    metric columns and counts the occurrences of each combination, named 'sampling_count'.

    Args:
        input_csv_path (str): Path to the input CSV file.
        output_csv_path (str): Path to save the aggregated output CSV file.
    """
    try:
        df = pd.read_csv(input_csv_path)
    except FileNotFoundError:
        print(f"Error: Input file '{input_csv_path}' not found.")
        return
    except pd.errors.EmptyDataError:
        print(f"Error: Input file '{input_csv_path}' is empty.")
        # Create an empty output file with expected columns if possible, or just an empty file
        # For simplicity, just creating an empty file here.
        # A more robust solution might try to infer columns or accept them as args.
        pd.DataFrame().to_csv(output_csv_path, index=False)
        return
    except Exception as e:
        print(f"Error reading CSV file '{input_csv_path}': {e}")
        return

    grouping_columns = [
        'batch_size',
        'speculative_num_draft_tokens',
        'speculative_num_steps',
        'top_k'
    ]

    # Check if all grouping columns exist in the DataFrame
    missing_group_cols = [col for col in grouping_columns if col not in df.columns]
    if missing_group_cols:
        print(f"Error: Missing crucial grouping columns in CSV: {missing_group_cols}")
        return

    # Identify metric columns (all columns not in grouping_columns)
    metric_columns = [col for col in df.columns if col not in grouping_columns]

    # Convert metric columns to numeric, coercing errors to NaN.
    # NaN values will be ignored by mean() by default.
    for col in metric_columns:
        if col in df.columns: # This check is somewhat redundant given how metric_columns is defined
            df[col] = pd.to_numeric(df[col], errors='coerce')

    # Filter to get only valid numeric metric columns for averaging
    valid_numeric_metrics = [
        col for col in metric_columns 
        if col in df.columns and pd.api.types.is_numeric_dtype(df[col])
    ]

    # Handle empty DataFrame case after initial read and column setup
    if df.empty:
        print("Input DataFrame is empty. Outputting an empty aggregated file with expected columns.")
        # Define expected output columns even for an empty result
        # (grouping columns + valid metrics discovered + sampling_count)
        output_cols_on_empty = grouping_columns + valid_numeric_metrics + ['sampling_count']
        pd.DataFrame(columns=output_cols_on_empty).to_csv(output_csv_path, index=False)
        return

    # --- Calculate counts for each group ---
    # We use dropna=False in groupby to include groups formed by NA keys if they exist,
    # though for the specified integer key columns, NAs are less likely unless from empty CSV cells.
    try:
        # .size() returns a Series with a MultiIndex (the group keys).
        # .reset_index(name='sampling_count') converts it to a DataFrame.
        grouped_counts = df.groupby(grouping_columns, dropna=False).size().reset_index(name='sampling_count')
    except Exception as e:
        print(f"Error during count aggregation: {e}")
        return
        
    # --- Calculate means if there are valid numeric metrics ---
    if valid_numeric_metrics:
        try:
            # as_index=False keeps grouping columns as regular columns in the output.
            grouped_means = df.groupby(grouping_columns, as_index=False, dropna=False)[valid_numeric_metrics].mean()
        except Exception as e:
            print(f"Error during mean aggregation: {e}")
            # Potentially, we could still output counts if means fail
            # For now, let's return if means fail but counts succeeded.
            # Or decide to write just counts.
            # final_aggregated_df = grouped_counts
            # ... and then save and return
            return

        # Merge means and counts.
        # A 'left' merge is generally safe here, assuming all groups in means should have counts.
        # If grouped_means could be empty while grouped_counts is not (e.g., all metrics were NaN),
        # merge will handle this correctly.
        try:
            final_aggregated_df = pd.merge(grouped_means, grouped_counts, on=grouping_columns, how='outer')
            # Using 'outer' to be absolutely sure all groups from both means and counts are included.
            # If a group only appears in counts (e.g., all its metric values were NaN, so it vanished from .mean() if not handled by pandas),
            # 'outer' merge ensures it's kept, and metric values will be NaN.
        except Exception as e:
            print(f"Error merging means and counts: {e}")
            return
    else:
        # If no numeric metrics to average, the result is just the counts.
        print("Warning: No valid numeric metric columns found to average. Output will only contain grouping columns and counts.")
        final_aggregated_df = grouped_counts

    # Reorder columns for desired output: grouping cols, then metrics, then sampling_count.
    if not final_aggregated_df.empty:
        # Metrics should be in their original relative order.
        # Ensure 'sampling_count' is last if it exists.
        ordered_columns = grouping_columns + \
                          [m_col for m_col in valid_numeric_metrics if m_col in final_aggregated_df.columns]
        if 'sampling_count' in final_aggregated_df.columns:
            ordered_columns.append('sampling_count')
        
        # Ensure all columns in ordered_columns actually exist in final_aggregated_df to avoid KeyErrors
        final_ordered_columns = [col for col in ordered_columns if col in final_aggregated_df.columns]
        
        final_aggregated_df = final_aggregated_df[final_ordered_columns]

    try:
        # Save the aggregated data to the output CSV file
        final_aggregated_df.to_csv(output_csv_path, index=False, float_format='%.6f')
        print(f"Aggregated data saved to '{output_csv_path}'")
    except Exception as e:
        print(f"Error writing to output file '{output_csv_path}': {e}")

# Example Usage:
if __name__ == "__main__":

    input_file = "/verl/outputs/timings_all_setting_rlhf.csv"
    output_file = "/verl/outputs/test_analysis_performance.csv"

    # 2. Call the function
    aggregate_speculative_data(input_file, output_file)

    # 3. (Optional) Print the content of the output file to verify
    print("\nContent of the output file:")
    try:
        with open(output_file, "r") as f:
            print(f.read())
    except FileNotFoundError:
        print(f"Output file '{output_file}' was not created.")
