EEGFaceSem / scripts /summarize_results.py
yefllower's picture
EEGFaceSem: processed data, code, models
196bee3
# summarize_results.py
import re
import os
import glob
import pickle
import numpy as np
import pandas as pd
from scipy.special import expit # Sigmoid function
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
PREFIX = 'B1'
LOG_DIR = './server_logs/'
RESULTS_DIR = './results/'
# Define all the experiments
MODELS_ORDER = ['LDA', 'LR', 'MLP', 'EEGNet', 'EEGPT']
STRATEGIES = ['single_subject', 'cross_subject', 'subject_adapted']
EXPERIMENTS = ['relevance']#, 'task', 'joint']
TASKS_ORDER = [
'facecat/female', 'facecat/male', 'facecat/blond', 'facecat/darkhaired',
'facecat/smiles', 'facecat/nosmile', 'facecat/old', 'facecat/young'
]
def parse_summary_line(line):
"""
Uses regex to pull mean/std from a SUMMARY line.
"""
metrics = {}
try:
metrics['acc_mean'] = float(re.search(r"acc:([\d\.]+) \+\/-", line).group(1))
metrics['acc_std'] = float(re.search(r"acc:[\d\.]+ \+\/- ([\d\.]+)", line).group(1))
metrics['auc_mean'] = float(re.search(r"auc:([\d\.]+) \+\/-", line).group(1))
metrics['auc_std'] = float(re.search(r"auc:[\d\.]+ \+\/- ([\d\.]+)", line).group(1))
metrics['f1_mean'] = float(re.search(r"f1:([\d\.]+) \+\/-", line).group(1))
metrics['f1_std'] = float(re.search(r"f1:[\d\.]+ \+\/- ([\d\.]+)", line).group(1))
metrics['task'] = int(re.search(r"task_id:(\d+),", line).group(1))
except Exception:
return None # Silently fail
return metrics
def parse_result_line(line):
"""
Uses regex to pull mean/std from a RESULT line.
"""
metrics = {}
try:
metrics['acc_mean'] = float(re.search(r"acc:([\d\.]+)", line).group(1))
metrics['auc_mean'] = float(re.search(r"auc:([\d\.]+)", line).group(1))
metrics['f1_mean'] = float(re.search(r"f1:([\d\.]+)", line).group(1))
metrics['task'] = int(re.search(r"task_id:(\d+),", line).group(1))
metrics['subject'] = int(re.search(r"subject:(\d+),", line).group(1))
except Exception:
return None # Silently fail
return metrics
prefix='B1'
exp='relevance'
strat='single_subject'
model='LDA'
log_file = f"log_{prefix}_{exp}_{strat}_{model}.log"
log_path = os.path.join(LOG_DIR, log_file)
def load_all_summaries(prefix=PREFIX, experiments=EXPERIMENTS, strategies=STRATEGIES, models=MODELS_ORDER):
"""
Parses all log files and returns a master DataFrame.
"""
print(f"Loading all summaries for prefix='{prefix}'...")
results_list = []
summary_list = []
for prefix in ['A1', 'B1']:
for exp in experiments:
for strat in strategies:
for model in models:
# 'relevance' experiment is special, it has 8 task files
if exp == 'relevance':
log_file = f"log_{prefix}_{exp}_{strat}_{model}.log"
log_path = os.path.join(LOG_DIR, log_file)
summary = None
if os.path.exists(log_path):
with open(log_path, 'r') as f:
for line in reversed(f.readlines()):
if "SUMMARY" in line:
summary = parse_summary_line(line)
if summary is None:
continue
summary.update({
'experiment': exp,
'strategy': strat,
'model': model,
})
summary_list.append(summary)
# RESULT, exp:relevance, strat:subject_adapted, task_id:5, subject:27, model:EEGNet, acc:0.8491, auc:0.8953, f1:0.7870
if "RESULT" in line and "fold:" not in line:
result = parse_result_line(line)
if result is None:
continue
result.update({
'experiment': exp,
'strategy': strat,
'model': model,
})
results_list.append(result)
else:
# 'task' and 'joint' experiments
log_file = f"log_{prefix}_{exp}_{strat}_{model}.log"
log_path = os.path.join(LOG_DIR, log_file)
summary = None
if os.path.exists(log_path):
with open(log_path, 'r') as f:
for line in reversed(f.readlines()):
if line.startswith("SUMMARY"):
summary = parse_summary_line(line)
break
if summary:
summary.update({
'experiment': exp,
'strategy': strat,
'model': model,
'task': 'all_tasks_combined' # Special task name
})
summary_list.append(summary)
print(f"Found {len(summary_list)} summary lines, {len(results_list)} result lines.")
return pd.DataFrame(summary_list), pd.DataFrame(results_list)
def format_table_rows(
df, results_df,
experiment,
strategy,
metric='auc',
precision=4,
models_order=MODELS_ORDER,
tasks_order=TASKS_ORDER
):
"""
Generates and prints only the inner LaTeX rows for a table.
It's robust to missing data (e.g., LDA for subject_adapted).
"""
print("\n" + "="*80)
print(f"Generating LaTeX Rows: Exp='{experiment}', Strat='{strategy}', Metric='{metric.upper()}'")
print("="*80 + "\n")
df_filt = df[(df['experiment'] == experiment) & (df['strategy'] == strategy)]
if df_filt.empty:
print(f"% No data found for: {experiment} / {strategy}")
return
metric_mean = f'{metric}_mean'
metric_std = f'{metric}_std'
if metric_mean not in df_filt.columns:
print(f"% No data for metric '{metric}' in {experiment} / {strategy}")
return
pivot = pd.pivot_table(
df_filt,
values=[metric_mean, metric_std],
index='task',
columns='model'
)
available_models = pivot[metric_mean].columns.tolist()
models_to_print = [m for m in models_order if m in available_models]
if 'EEGPT' not in available_models:
models_to_print += ['EEGPT']
tasks_to_print = tasks_order
task_means = pivot[metric_mean] # Shape: (n_tasks, n_models)
all_task_mean = task_means.mean(axis=0) # Shape: (n_models,)
all_task_std = task_means.std(axis=0)
latex_str = []
# Header
col_str = " & ".join(models_to_print)
latex_str.append(f"% Table for {experiment} / {strategy} / {metric}")
latex_str.append(f"% Columns: Task & {col_str} \\\\")
latex_str.append("\\midrule")
tmp = {}
# Body
for tid, task_name in enumerate(tasks_to_print):
# Shorten the task name, e.g., 'facecat/blond' -> 'Blond'
short_name = task_name.split('/')[-1].title()
row = f"{short_name:15s} & "
cells = []
for model in models_to_print:
if (metric_mean, model) not in pivot.columns:
# try to recover from results_df
results_filt = results_df[
(results_df['experiment'] == experiment) &
(results_df['strategy'] == strategy) &
(results_df['model'] == model) &
(results_df['task'] == tid)
]
# average all subjects for this task
mean_val = results_filt[metric_mean].mean()
std_val = results_filt[metric_mean].std()
else:
mean_val = pivot.loc[tid][(metric_mean, model)]
std_val = pivot.loc[tid][(metric_std, model)]
tmp[(tid, model)] = (mean_val, std_val)
cell_str = f"${mean_val:.{precision}f} \\pm {std_val:.{precision}f}$"
cells.append(cell_str)
row += " & ".join(cells) + " \\\\"
latex_str.append(row)
# Footer (All-Task Mean)
latex_str.append("\\midrule")
row = f"{'Mean':15s} & "
cells = []
for model in models_to_print:
mean_val = all_task_mean.loc[model]
std_val = all_task_std.loc[model] # std of the 8 task means
tmp_val = np.array([tmp[(tid, model)][0] for tid in range(len(tasks_to_print))])
tmp_mean = np.mean(tmp_val)
tmp_std = np.std(tmp_val)
print(f"{model}: {tmp_mean:.4f} +/- {tmp_std:.4f}, {mean_val:.4f} +/- {std_val:.4f}, {tmp_val.shape}")
mean_val, std_val = tmp_mean, tmp_std
cell_str = f"${mean_val:.{precision}f} \\pm {std_val:.{precision}f}$"
cells.append(cell_str)
row += " & ".join(cells) + " \\\\"
latex_str.append(row)
print("\n".join(latex_str))
def run_voting_analysis(
prefix=PREFIX,
model_name='LDA',
experiment_name='relevance',
strategy_name='cross_subject',
tasks=TASKS_ORDER,
results_dir=RESULTS_DIR
):
"""
Loads all pickle files for a model and performs multi-subject voting.
This is designed for 'cross_subject' data, where predictions
for each subject are independent.
"""
print("\n" + "="*80)
print(f"Running Voting Analysis: Model='{model_name}', Strat='{strategy_name}'")
print("="*80)
all_predictions = []
# check all files inside "results_all/archive/"
archive_dir = "results_all/archive/"
for pkl_path in glob.glob(os.path.join(archive_dir, "*.pkl")):
# try parse filename
# example: results_A1_relevance_cross_subject_EEGPT_task0.pkl
s = pkl_path.split('/')[-1].split('.')[0].split('_')
if len(s) < 8:
continue
for experiment in EXPERIMENTS:
if experiment in pkl_path:
break
for strategy in STRATEGIES:
if strategy in pkl_path:
break
for model in MODELS_ORDER:
if model in pkl_path:
break
for task_id, task_name in enumerate(tasks):
if f"task{task_id}" in pkl_path:
break
if model_name != model or experiment_name != experiment or strategy_name != strategy:
continue
results = pickle.load(open(pkl_path, 'rb'))
# results.values() = (y_prob, test_mask, ids_test, (acc, auc, f1))
for key, (y_prob, _, ids_test, _) in results.items():
# key is test_subject_id
for i in range(len(y_prob)):
prob_class_1 = y_prob[i, 1]
true_label = ids_test[i, 3]
image_idx = ids_test[i, 4]
all_predictions.append({
'task_id': task_id,
'image_idx': image_idx,
'true_label': true_label,
'y_prob_1': prob_class_1,
'test_subject': key
})
if not all_predictions:
print("No predictions found. Aborting voting analysis.")
return
df = pd.DataFrame(all_predictions)
print(f"Loaded {len(df)} single-trial predictions from {model}...")
# Group by unique image and average probabilities
voted_df = df.groupby(['task_id', 'image_idx']).agg(
voted_prob=('y_prob_1', 'mean'), # Average the probabilities
true_label=('true_label', 'first'),
n_votes=('true_label', 'count')
)
print(f"Aggregated into {len(voted_df)} unique image-task pairs.")
# Calculate final voted metrics
y_true = voted_df['true_label']
y_prob = voted_df['voted_prob']
y_pred = (y_prob > 0.5).astype(int)
voted_auc = roc_auc_score(y_true, y_prob)
voted_acc = accuracy_score(y_true, y_pred)
voted_f1 = f1_score(y_true, y_pred, average='macro')
print("\n--- Voting Results ---")
print(f"Model: {model}")
print(f"Strategy: {strategy} (Multi-Subject Voted)")
print(f"Voted AUC: {voted_auc:.4f}")
print(f"Voted Acc: {voted_acc:.4f}")
print(f"Voted F1: {voted_f1:.4f}")
def bootstrap_metric(y_true, y_prob, metric, n_bootstraps=1000, random_state=42):
results = []
y_prob = np.array(y_prob)
y_true = np.array(y_true)
for i in range(n_bootstraps):
idx = np.random.choice(len(y_true), size=len(y_true), replace=True)
y_true_boot = y_true[idx]
y_prob_boot = y_prob[idx]
if metric == 'auc':
score = roc_auc_score(y_true_boot, y_prob_boot)
elif metric == 'acc':
y_pred_boot = (y_prob_boot > 0.5).astype(int)
score = accuracy_score(y_true_boot, y_pred_boot)
elif metric == 'f1':
y_pred_boot = (y_prob_boot > 0.5).astype(int)
score = f1_score(y_true_boot, y_pred_boot, average='macro')
results.append(score)
return np.mean(results), np.std(results)
# print by task
task_metrics = {}
for task_id, task_name in enumerate(tasks):
task_df = voted_df[voted_df.index.get_level_values('task_id') == task_id]
y_true = task_df['true_label']
y_prob = task_df['voted_prob']
y_pred = (y_prob > 0.5).astype(int)
# task_auc = roc_auc_score(y_true, y_prob)
# task_acc = accuracy_score(y_true, y_pred)
# task_f1 = f1_score(y_true, y_pred, average='macro')
task_auc, task_auc_std = bootstrap_metric(y_true, y_prob, 'auc')
task_acc, task_acc_std = bootstrap_metric(y_true, y_prob, 'acc')
task_f1, task_f1_std = bootstrap_metric(y_true, y_prob, 'f1')
print(f"\nTask {task_id}: {task_name:<20s}, AUC: {task_auc:.4f} +/- {task_auc_std:.4f}, Acc: {task_acc:.4f} +/- {task_acc_std:.4f}, F1: {task_f1:.4f} +/- {task_f1_std:.4f}")
task_metrics[task_id] = {'auc': task_auc, 'acc': task_acc, 'f1': task_f1, 'task': task_id, 'auc_std': task_auc_std, 'acc_std': task_acc_std, 'f1_std': task_f1_std}
return voted_df, {'auc': voted_auc, 'acc': voted_acc, 'f1': voted_f1}, task_metrics
# --- 5. Main Execution ---
if __name__ == '__main__':
master_df, results_df = load_all_summaries()
print("\n--- Master DataFrame Head ---")
print(master_df.head())
# --- Single Subject ---
format_table_rows(
master_df, results_df,
experiment='relevance',
strategy='single_subject',
metric='auc'
)
# --- Cross Subject ---
format_table_rows(
master_df, results_df,
experiment='relevance',
strategy='cross_subject',
metric='auc'
)
# --- Subject Adapted ---
format_table_rows(
master_df, results_df,
experiment='relevance',
strategy='subject_adapted',
metric='auc'
)
# Run the voting analysis
print("\n--- Running Voting Analysis ---")
all_voted_results = []
all_task_metrics = {}
for model in MODELS_ORDER:
if model in ['LDA', 'LR', 'MLP', 'EEGNet', 'EEGPT']:
voted_df, voted_metrics, task_metrics = run_voting_analysis(
model_name=model,
strategy_name='cross_subject'
)
voted_metrics['model'] = model
all_voted_results.append(voted_metrics)
all_task_metrics[model] = task_metrics
print("\n--- Final Voting Summary Table ---")
voted_summary_df = pd.DataFrame(all_voted_results).set_index('model')
print(voted_summary_df)
print("\n--- Final Voting Task Table AUC latex table ---")
for task_id, task_name in enumerate(TASKS_ORDER):
task_df = pd.DataFrame(all_task_metrics).T
line = f"{task_name.split('/')[-1].title():<20s} "
for model in MODELS_ORDER:
if model in task_df.index:
mean_val = task_df.loc[model, task_id]['auc']
std_val = task_df.loc[model, task_id]['auc_std']
line += f"& {mean_val:.4f} $\\pm$ {std_val:.4f}"
else:
line += "& "
line += " \\\\"
print(line)
print("\\midrule")
line = f"{'Mean':<20s} "
for model in MODELS_ORDER:
if model in task_df.index:
tmp_val = [task_df.loc[model, task_id]['auc'] for task_id in range(len(TASKS_ORDER))]
mean_val = np.mean(tmp_val)
std_val = np.std(tmp_val)
tmp_val = voted_summary_df.loc[model, 'auc']
# print(f"{model}: {tmp_val:.4f} +/- {mean_val:.4f}")
line += f"& {mean_val:.4f} $\\pm$ {std_val:.4f}"
else:
line += "& "
line += " \\\\"
print(line)