Adding statistical tests, code to make the tiled images for the brain diffuser failed cases, and updating data filtering criterion to match prolifics guidance

#9
Files changed (1) hide show
  1. human_trials_mindeye2.ipynb +412 -0
human_trials_mindeye2.ipynb ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os, sys, shutil\n",
10
+ "from tqdm import tqdm\n",
11
+ "import numpy as np\n",
12
+ "import pandas as pd\n",
13
+ "import matplotlib as plt\n",
14
+ "from PIL import Image\n",
15
+ "from matplotlib.lines import Line2D\n",
16
+ "import matplotlib as mpl\n",
17
+ "import math\n",
18
+ "import matplotlib.image as mpimg\n",
19
+ "import random\n",
20
+ "from datetime import datetime\n",
21
+ "from torchvision import transforms\n",
22
+ "import torch\n",
23
+ "from scipy.stats import binom_test\n",
24
+ "# os.chdir(\"..\")\n",
25
+ "experiment_version = 4\n",
26
+ "os.makedirs(f\"stimuli_v{experiment_version}\", exist_ok=True)\n",
27
+ "os.makedirs(f\"responses_v{experiment_version}\", exist_ok=True)\n",
28
+ "os.makedirs(f\"dataframes_v{experiment_version}\", exist_ok=True)"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "markdown",
33
+ "metadata": {},
34
+ "source": [
35
+ "# CREATE EXPERIMENT DATAFRAME AND TRIAL FILES FOR MEADOWS"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": null,
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "#Experiment column key:\n",
45
+ "# 1: Experiment 1, mindeye vs second sight\n",
46
+ "# 2: Experiment 2, second sight two way identification\n",
47
+ "# 3: Experiment 3, mental imagery two way identification\n",
48
+ "df_exp = pd.DataFrame(columns=[\"experiment\", \"stim1\", \"stim2\", \"stim3\", \"sample\", \"subject\", \"target_on_left\", \"catch_trial\", \"rep\"])\n",
49
+ "i=0\n",
50
+ "random_count = 0\n",
51
+ "gt_tensor_block = torch.load(\"raw_stimuli/all_images_425.pt\")\n",
52
+ "for subj in [1,2,5,7]: #1,2,5,7\n",
53
+ " subject_enhanced_recons_40 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_40sess_24bs_all_enhancedrecons.pt\")\n",
54
+ " subject_unclip_recons_40 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_40sess_24bs_all_recons.pt\")\n",
55
+ " subject_enhanced_recons_1 = torch.load(f\"raw_stimuli/final_subj0{subj}_pretrained_1sess_24bs_all_enhancedrecons.pt\")\n",
56
+ " subject_braindiffuser_recons_1 = torch.load(f\"raw_stimuli/subj0{subj}_brain_diffuser_750_all_recons.pt\")\n",
57
+ " #Experiment 1, mindeye two way identification\n",
58
+ " random_indices = random.sample(range(1000), 300)\n",
59
+ " for sample in tqdm(random_indices):\n",
60
+ " \n",
61
+ " # Get random sample to compare against\n",
62
+ " random_number = random.choice([x for x in range(1000) if x != sample])\n",
63
+ " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n",
64
+ " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n",
65
+ " sample_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[sample]).resize((425,425))\n",
66
+ " random_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[random_number]).resize((425,425))\n",
67
+ " sample_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_40.png\")\n",
68
+ " random_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{random_number}_subject{subj}_mindeye_enhanced_40.png\")\n",
69
+ " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n",
70
+ " \n",
71
+ " # Configure stimuli names and order in experiment dataframe\n",
72
+ " sample_names = [f\"{random_number}_subject{subj}_mindeye_enhanced_40\", f\"{sample}_subject{subj}_mindeye_enhanced_40\"]\n",
73
+ " order = random.randrange(2)\n",
74
+ " left_sample = sample_names.pop(order)\n",
75
+ " right_sample = sample_names.pop()\n",
76
+ " gt_sample = f\"{sample}_ground_truth\"\n",
77
+ " df_exp.loc[i] = {\"experiment\" : 1, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n",
78
+ " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n",
79
+ " i+=1\n",
80
+ " \n",
81
+ " #Experiment 2, refined vs unrefined\n",
82
+ " random_indices = random.sample(range(1000), 300)\n",
83
+ " for sample in tqdm(random_indices):\n",
84
+ " \n",
85
+ " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n",
86
+ " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n",
87
+ " sample_enhanced_recons_40 = transforms.ToPILImage()(subject_enhanced_recons_40[sample]).resize((425,425))\n",
88
+ " sample_unclip_recons_40 = transforms.ToPILImage()(subject_unclip_recons_40[sample]).resize((425,425))\n",
89
+ " sample_enhanced_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_40.png\")\n",
90
+ " sample_unclip_recons_40.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_unclip_40.png\")\n",
91
+ " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n",
92
+ " \n",
93
+ " # Configure stimuli names and order in experiment dataframe\n",
94
+ " sample_names = [f\"{sample}_subject{subj}_mindeye_unclip_40\", f\"{sample}_subject{subj}_mindeye_enhanced_40\"]\n",
95
+ " order = random.randrange(2)\n",
96
+ " left_sample = sample_names.pop(order)\n",
97
+ " right_sample = sample_names.pop()\n",
98
+ " gt_sample = f\"{sample}_ground_truth\"\n",
99
+ " df_exp.loc[i] = {\"experiment\" : 2, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n",
100
+ " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n",
101
+ " i+=1\n",
102
+ " \n",
103
+ " #Experiment 3, refined 1 session vs brain diffuser 1 session\n",
104
+ " random_indices = random.sample(range(1000), 300)\n",
105
+ " for sample in tqdm(random_indices):\n",
106
+ " \n",
107
+ " # Extract the stimulus images from tensor blocks and save as pngs to stimuli folder\n",
108
+ " gt_sample = transforms.ToPILImage()(gt_tensor_block[sample])\n",
109
+ " sample_enhanced_recons_1 = transforms.ToPILImage()(subject_enhanced_recons_1[sample]).resize((425,425))\n",
110
+ " sample_braindiffuser_1 = transforms.ToPILImage()(subject_braindiffuser_recons_1[sample]).resize((425,425))\n",
111
+ " sample_enhanced_recons_1.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_mindeye_enhanced_1.png\")\n",
112
+ " sample_braindiffuser_1.save(f\"stimuli_v{experiment_version}/{sample}_subject{subj}_braindiffuser_1.png\")\n",
113
+ " gt_sample.save(f\"stimuli_v{experiment_version}/{sample}_ground_truth.png\")\n",
114
+ " \n",
115
+ " # Configure stimuli names and order in experiment dataframe\n",
116
+ " sample_names = [f\"{sample}_subject{subj}_braindiffuser_1\", f\"{sample}_subject{subj}_mindeye_enhanced_1\"]\n",
117
+ " order = random.randrange(2)\n",
118
+ " left_sample = sample_names.pop(order)\n",
119
+ " right_sample = sample_names.pop()\n",
120
+ " gt_sample = f\"{sample}_ground_truth\"\n",
121
+ " df_exp.loc[i] = {\"experiment\" : 3, \"stim1\" : gt_sample, \"stim2\" : left_sample, \"stim3\" : right_sample, \"sample\" : sample, \"subject\" : subj, \n",
122
+ " \"target_on_left\" : order == 1, \"catch_trial\" : None, \"rep\" : 0}\n",
123
+ " i+=1\n",
124
+ "df_exp = df_exp.sample(frac=1)\n",
125
+ "print(len(df_exp))\n",
126
+ "print(df_exp)"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "metadata": {},
133
+ "outputs": [],
134
+ "source": [
135
+ "# Check if all images are present in final stimuli folder\n",
136
+ "count_not_found = 0\n",
137
+ "stim_path = f\"stimuli_v{experiment_version}/\"\n",
138
+ "for index, row in df_exp.iterrows():\n",
139
+ " if not (os.path.exists(f\"{stim_path}{row['stim1']}.png\")):\n",
140
+ " print(f\"{row['stim1']}.png\")\n",
141
+ " count_not_found += 1\n",
142
+ " if not (os.path.exists(f\"{stim_path}{row['stim2']}.png\")):\n",
143
+ " print(f\"{row['stim2']}.png\")\n",
144
+ " count_not_found += 1\n",
145
+ " if not (os.path.exists(f\"{stim_path}{row['stim3']}.png\")):\n",
146
+ " print(f\"{row['stim3']}.png\")\n",
147
+ " count_not_found += 1\n",
148
+ "print(count_not_found)"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": null,
154
+ "metadata": {},
155
+ "outputs": [],
156
+ "source": [
157
+ "#Add participant ID column\n",
158
+ "pIDs = []\n",
159
+ "for i in range(len(df_exp)):\n",
160
+ " pIDs.append(i // 60)\n",
161
+ "df_exp.insert(0, \"pID\", pIDs)\n",
162
+ "print(len(df_exp[(df_exp['pID'] == 0)]))\n",
163
+ "#Add catch trials within each pID section\n",
164
+ "for pID in range(max(pIDs)):\n",
165
+ " df_pid = df_exp[(df_exp['experiment'] == 1) & (df_exp['pID'] == pID)]\n",
166
+ " \n",
167
+ " # Ground truth catch trials\n",
168
+ " gt_catch_trials = df_pid.sample(n=9)\n",
169
+ " gt_catch_trials['catch_trial'] = \"ground_truth\"\n",
170
+ " for index, row in gt_catch_trials.iterrows():\n",
171
+ " \n",
172
+ " order = random.randrange(2)\n",
173
+ " ground_truth = row['stim1']\n",
174
+ " stims = [row['stim2'], ground_truth]\n",
175
+ " \n",
176
+ " gt_catch_trials.at[index, 'stim2'] = stims.pop(order)\n",
177
+ " gt_catch_trials.at[index, 'stim3'] = stims.pop()\n",
178
+ " # Target on left here means the ground truth repeat is on the left\n",
179
+ " gt_catch_trials.at[index, 'target_on_left'] = (order == 1)\n",
180
+ " \n",
181
+ " # repeated trial catch trials, first sample indices\n",
182
+ " sampled_indices = df_pid.sample(n=9).index\n",
183
+ " #mark the trials at these indices as catch trials\n",
184
+ " df_exp.loc[sampled_indices]['catch_trial'] = \"repeat\"\n",
185
+ " #create duplicate trials for these samples to repeat\n",
186
+ " repeat_catch_trials_rep1 = df_exp.loc[sampled_indices].copy()\n",
187
+ " repeat_catch_trials_rep2 = df_exp.loc[sampled_indices].copy()\n",
188
+ " repeat_catch_trials_rep1['rep'] = 1\n",
189
+ " repeat_catch_trials_rep2['rep'] = 2\n",
190
+ " \n",
191
+ " \n",
192
+ " df_exp = pd.concat([df_exp, gt_catch_trials, repeat_catch_trials_rep1, repeat_catch_trials_rep2])\n",
193
+ " \n",
194
+ "df_exp = df_exp.sample(frac=1).sort_values(by='pID', kind='mergesort')\n",
195
+ "print(len(df_exp))\n",
196
+ "print(len(df_exp[(df_exp['pID'] == 0)]))"
197
+ ]
198
+ },
199
+ {
200
+ "cell_type": "code",
201
+ "execution_count": null,
202
+ "metadata": {},
203
+ "outputs": [],
204
+ "source": [
205
+ "\n",
206
+ "df_exp.to_csv(f'dataframes_v{experiment_version}/experiment_v{experiment_version}.csv', index=False)\n",
207
+ "\n",
208
+ "df_exp_tsv = df_exp[['pID', 'stim1', 'stim2', 'stim3']].copy()\n",
209
+ "df_exp_tsv.to_csv(f\"dataframes_v{experiment_version}/meadow_trials_v{experiment_version}.tsv\", sep=\"\\t\", index=False, header=False) "
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "markdown",
214
+ "metadata": {},
215
+ "source": [
216
+ "# THE FOLLOWING CELLS ARE FOR PROCESSING RESPONSES"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "code",
221
+ "execution_count": null,
222
+ "metadata": {},
223
+ "outputs": [],
224
+ "source": [
225
+ "response_path = f\"responses_v{experiment_version}/\"\n",
226
+ "dataframe_path = f\"dataframes_v{experiment_version}/\"\n",
227
+ "df_experiment = pd.read_csv(dataframe_path + f\"experiment_v{experiment_version}.csv\")\n",
228
+ "response_version = \"2\"\n",
229
+ "df_responses = pd.read_csv(f\"{response_path}deployment_v{response_version}.csv\")\n",
230
+ "print(df_responses)"
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "code",
235
+ "execution_count": null,
236
+ "metadata": {},
237
+ "outputs": [],
238
+ "source": [
239
+ "df_responses.head()\n",
240
+ "df_trial = pd.DataFrame(columns=[\"experiment\", \"stim1\", \"stim2\", \"stim3\", \"sample\", \"subject\", \"target_on_left\", \"method\", \"catch_trial\", \"rep\", \"picked_left\", \"participant\"])\n",
241
+ "df_experiment['picked_left'] = None\n",
242
+ "for index, row in tqdm(df_responses.iterrows()):\n",
243
+ " if row['label'] == row['stim2_id']:\n",
244
+ " picked_left = True\n",
245
+ " elif row['label'] == row['stim3_id']:\n",
246
+ " picked_left = False\n",
247
+ " else:\n",
248
+ " print(\"Error\")\n",
249
+ " break\n",
250
+ " start_timestamp = row['time_trial_start']\n",
251
+ " end_timestamp = row['time_trial_response']\n",
252
+ " start = datetime.fromisoformat(start_timestamp.replace(\"Z\", \"+00:00\"))\n",
253
+ " end = datetime.fromisoformat(end_timestamp.replace(\"Z\", \"+00:00\"))\n",
254
+ " # Calculate the difference in seconds\n",
255
+ " time_difference_seconds = (end - start).total_seconds()\n",
256
+ " \n",
257
+ " df_trial.loc[index] = df_experiment[(df_experiment['stim1'] == row['stim1_name']) & (df_experiment['stim2'] == row['stim2_name']) & (df_experiment['stim3'] == row['stim3_name'])].iloc[0]\n",
258
+ " df_trial.loc[index, 'picked_left'] = picked_left\n",
259
+ " df_trial.loc[index, 'participant'] = row['participation']\n",
260
+ " df_trial.loc[index, 'response_time'] = time_difference_seconds\n",
261
+ " \n",
262
+ "df_trial[\"picked_target\"] = df_trial[\"picked_left\"] == df_trial[\"target_on_left\"]\n",
263
+ "print(df_trial)"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": null,
269
+ "metadata": {},
270
+ "outputs": [],
271
+ "source": [
272
+ "# number of participants\n",
273
+ "print(\"Total participants:\", len(df_trial[\"participant\"].unique()))\n",
274
+ "\n",
275
+ "gt_failures = df_trial[(df_trial['catch_trial'] == 'ground_truth') & (df_trial['picked_target'] == False)].groupby('participant').size()\n",
276
+ "# Identify participants who failed more than 1 ground truth catch trial\n",
277
+ "participants_to_remove_rule1 = gt_failures[gt_failures > 1].index.tolist()\n",
278
+ "print(\"Participants to remove 1:\", participants_to_remove_rule1)\n",
279
+ "\n",
280
+ "# Remove participants who failed the repeat catch trial, and gave different responses for identical trials\n",
281
+ "repeat_trials = df_trial[df_trial['rep'] > 0]\n",
282
+ "grouped_repeat_trials = repeat_trials.groupby(['stim1', 'stim2', 'stim3'])\n",
283
+ "participant_failures = {}\n",
284
+ "# Iterate through groups to check consistency in \"picked_target\" across repetitions\n",
285
+ "for _, group in grouped_repeat_trials:\n",
286
+ " if group['picked_target'].nunique() != 1: # Inconsistent \"picked_target\" within the group\n",
287
+ " for participant in group['participant'].unique(): \n",
288
+ " participant_failures[participant] = participant_failures.get(participant, 0) + 1\n",
289
+ "\n",
290
+ "# Identify participants who failed at least one set of trial repetitions\n",
291
+ "participants_to_remove_rule2 = [participant for participant, failures in participant_failures.items() if failures > 1]\n",
292
+ "print(\"Participants to remove 2:\", participants_to_remove_rule2)\n",
293
+ "\n",
294
+ "participants_to_remove = set(participants_to_remove_rule1).union(set(participants_to_remove_rule2))\n",
295
+ "filtered_df = df_trial[~df_trial['participant'].isin(participants_to_remove)]\n",
296
+ "print(\"Clean participants:\", len(filtered_df[\"participant\"].unique()))\n",
297
+ "print(len(df_trial), len(filtered_df))\n",
298
+ "print(participants_to_remove)\n",
299
+ "filtered_df.to_csv(f'{dataframe_path}filtered_responses_v{response_version}.csv', index=False)"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "code",
304
+ "execution_count": null,
305
+ "metadata": {},
306
+ "outputs": [],
307
+ "source": [
308
+ "# Load filtered responses\n",
309
+ "filtered_df = pd.read_csv(f'{dataframe_path}filtered_responses_v{response_version}.csv')\n",
310
+ "# Filter out catch trials\n",
311
+ "df_trial_exp = filtered_df[(filtered_df['catch_trial'].isnull() & (filtered_df['rep'] == 0))]\n",
312
+ "\n",
313
+ "# Grab results from an individual experiment and print them out\n",
314
+ "df_trial_exp1 = df_trial_exp[df_trial_exp['experiment'] == 3]\n",
315
+ "\n",
316
+ "# Perform a binomial test\n",
317
+ "# The null hypothesis is that the probability of success is 0.5 (chance level)\n",
318
+ "p_value = binom_test(df_trial_exp1['picked_target'].sum(), n=len(df_trial_exp1['picked_target']), p=0.5, alternative='two-sided')\n",
319
+ "\n",
320
+ "print(\"Number of experiment trials:\", len(df_trial_exp1))\n",
321
+ "print(\"Success rate: \", len(df_trial_exp1[df_trial_exp1[\"picked_target\"]]) / len(df_trial_exp1))\n",
322
+ "print(f'P-value: {p_value}')"
323
+ ]
324
+ },
325
+ {
326
+ "cell_type": "code",
327
+ "execution_count": null,
328
+ "metadata": {},
329
+ "outputs": [],
330
+ "source": [
331
+ "import shutil\n",
332
+ "from PIL import Image, ImageDraw, ImageFont\n",
333
+ "\n",
334
+ "# Filter for experiment 3 rows where picked_target is false\n",
335
+ "df_exp3_failures = df_trial_exp[df_trial_exp['experiment'] == 3]\n",
336
+ "df_exp3_failures = df_exp3_failures[df_exp3_failures['picked_target'] == False]\n",
337
+ "\n",
338
+ "# Create the \"brain_diffuser_failures\" folder if it doesn't exist\n",
339
+ "os.makedirs(\"brain_diffuser_failures_tiled\", exist_ok=True)\n",
340
+ "\n",
341
+ "# Copy the stimuli from stimuli_v4 to the \"brain_diffuser_failures\" folder\n",
342
+ "# Set the dimensions for the concatenated image\n",
343
+ "width = 3 * 425\n",
344
+ "height = 450\n",
345
+ "\n",
346
+ "# Create a blank canvas for the concatenated image\n",
347
+ "concatenated_image = Image.new('RGB', (width, height), (255, 255, 255))\n",
348
+ "draw = ImageDraw.Draw(concatenated_image)\n",
349
+ "\n",
350
+ "# Set the font properties for the title captions\n",
351
+ "font = ImageFont.truetype(\"arial.ttf\", 16)\n",
352
+ "\n",
353
+ "# Iterate over the rows in df_exp3_failures\n",
354
+ "for index, row in df_exp3_failures.iterrows():\n",
355
+ " # Get the paths for the stimuli images\n",
356
+ " stim1_path = f\"stimuli_v4/{row['stim1']}.png\"\n",
357
+ " stim2_path = f\"stimuli_v4/{row['stim2']}.png\"\n",
358
+ " stim3_path = f\"stimuli_v4/{row['stim3']}.png\"\n",
359
+ " \n",
360
+ " # Open the stimuli images\n",
361
+ " stim1_image = Image.open(stim1_path)\n",
362
+ " stim2_image = Image.open(stim2_path)\n",
363
+ " stim3_image = Image.open(stim3_path)\n",
364
+ " \n",
365
+ " # Resize the stimuli images to match the desired dimensions\n",
366
+ " stim1_image = stim1_image.resize((425, 425))\n",
367
+ " stim2_image = stim2_image.resize((425, 425))\n",
368
+ " stim3_image = stim3_image.resize((425, 425))\n",
369
+ " \n",
370
+ " # Calculate the positions for the stimuli images\n",
371
+ " x1 = 0\n",
372
+ " x2 = 425\n",
373
+ " x3 = 2 * 425\n",
374
+ " y = 0\n",
375
+ " \n",
376
+ " # Paste the stimuli images onto the concatenated image\n",
377
+ " concatenated_image.paste(stim1_image, (x1, y))\n",
378
+ " concatenated_image.paste(stim2_image, (x2, y))\n",
379
+ " concatenated_image.paste(stim3_image, (x3, y))\n",
380
+ " \n",
381
+ " # Add the title captions for each image\n",
382
+ " draw.text((x1, y + 425), f\"Stim1 (GT): {row['stim1']}\", font=font, fill=(0, 0, 0))\n",
383
+ " draw.text((x2, y + 425), f\"Stim2: {row['stim2']}\", font=font, fill=(0, 0, 0))\n",
384
+ " draw.text((x3, y + 425), f\"Stim3: {row['stim3']}\", font=font, fill=(0, 0, 0))\n",
385
+ "\n",
386
+ " # Save the concatenated image\n",
387
+ " concatenated_image.save(f\"brain_diffuser_failures_tiled/{index}.png\")\n"
388
+ ]
389
+ }
390
+ ],
391
+ "metadata": {
392
+ "kernelspec": {
393
+ "display_name": "SS",
394
+ "language": "python",
395
+ "name": "python3"
396
+ },
397
+ "language_info": {
398
+ "codemirror_mode": {
399
+ "name": "ipython",
400
+ "version": 3
401
+ },
402
+ "file_extension": ".py",
403
+ "mimetype": "text/x-python",
404
+ "name": "python",
405
+ "nbconvert_exporter": "python",
406
+ "pygments_lexer": "ipython3",
407
+ "version": "3.10.12"
408
+ }
409
+ },
410
+ "nbformat": 4,
411
+ "nbformat_minor": 2
412
+ }