Datasets:

Modalities:
Video
DOI:
License:
Evanjaa commited on
Commit
b46c0af
1 Parent(s): 409776e

Upload 2 files

Browse files
Files changed (2) hide show
  1. TempTAC.ipynb +1219 -0
  2. spatial-approach.ipynb +1692 -0
TempTAC.ipynb ADDED
@@ -0,0 +1,1219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "7033b456-1f53-4201-bb2d-64a02e01ffa8",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Imports"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 1,
14
+ "id": "33731b76-6b28-4fd4-b193-036a317f3f28",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n",
19
+ "from sklearn.utils.class_weight import compute_class_weight\n",
20
+ "\n",
21
+ "from torch.utils.data import TensorDataset, DataLoader\n",
22
+ "from sklearn.model_selection import train_test_split\n",
23
+ "from sklearn.preprocessing import MinMaxScaler\n",
24
+ "from collections import Counter\n",
25
+ "from tqdm.notebook import tqdm\n",
26
+ "\n",
27
+ "import torch.nn.functional as F\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "import torch.optim as optim\n",
30
+ "import torch.nn as nn\n",
31
+ "import seaborn as sns\n",
32
+ "import pandas as pd\n",
33
+ "import numpy as np\n",
34
+ "import warnings\n",
35
+ "import imblearn\n",
36
+ "import optuna\n",
37
+ "import torch\n",
38
+ "import copy\n",
39
+ "import json\n",
40
+ "import os\n",
41
+ "\n",
42
+ "warnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n",
43
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "id": "5ade242d-aabb-4b16-b10a-0572f4198a95",
49
+ "metadata": {},
50
+ "source": [
51
+ "## Load CLS-tokens and map 'incomplete-classes' to their respective full classes\n"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": 2,
57
+ "id": "b994e92d-effd-4575-b1db-27154b903ad2",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "#X = torch.load('/home/evan/D1/project/code/raw_cls_tokens_features.pt', map_location=device)\n",
62
+ "#X = torch.load('/home/evan/D1/project/code/stretched_cls_tokens.pt', map_location=device)\n",
63
+ "#X = torch.load('/home/evan/D1/project/code/reflected_cls_tokens.pt', map_location=device)\n",
64
+ "\n",
65
+ "y = np.load('/home/evan/D1/project/code/cls_tokens_labels.npy')\n",
66
+ "frame_counts = np.load('/home/evan/D1/project/code/frame_counts.npy')\n",
67
+ "\n",
68
+ "\n",
69
+ "class_mapping = {0:0, 1: 1, 2: 2, 3: 1, 4: 2}\n",
70
+ "\n",
71
+ "for i, label in enumerate(y):\n",
72
+ " y[i] = class_mapping[label]\n",
73
+ "print('Done')"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "markdown",
78
+ "id": "f1d19317-da86-4539-bafb-1d0b35f6e46a",
79
+ "metadata": {},
80
+ "source": [
81
+ "## Helper functions to split sequence, extract and (add context frames - inactive)"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": 3,
87
+ "id": "14c04222-3270-4adf-95b0-d92be6f771ed",
88
+ "metadata": {},
89
+ "outputs": [],
90
+ "source": [
91
+ "def split_sequences_np(arr):\n",
92
+ " \"\"\"Find the difference between consecutive elements\"\"\"\n",
93
+ " \n",
94
+ " diffs = np.diff(arr)\n",
95
+ " # Identify where the difference is not 1 (i.e., breaks in consecutive sequences)\n",
96
+ " breaks = np.where(diffs != 1)[0] + 1\n",
97
+ " \n",
98
+ " # Use numpy split to divide the array at every break point\n",
99
+ " return np.split(arr, breaks)\n",
100
+ "\n",
101
+ "def extract_data(games, boundaries, X, y, indices):\n",
102
+ " X_split, y_split, idx_split = [], [], []\n",
103
+ " for g in games:\n",
104
+ " start = 0 if g == 0 else boundaries[g-1]\n",
105
+ " end = boundaries[g]\n",
106
+ " X_split.append(X[start:end])\n",
107
+ " y_split.append(y[start:end])\n",
108
+ " idx_split.extend(indices[start:end])\n",
109
+ " return torch.cat(X_split), torch.cat(y_split), idx_split\n",
110
+ "\n",
111
+ "def add_context_frames(seq, total_frames, context_size=0, last_context_end=0):\n",
112
+ " \"\"\"Adds context frames to the sequence, ensuring no overlap with other sequences or video boundaries\"\"\"\n",
113
+ " seq_start_idx, seq_end_idx = seq[0], seq[-1]\n",
114
+ " \n",
115
+ " # Calculate start and end of context\n",
116
+ " start_with_context = max(seq_start_idx - context_size, last_context_end + 1, 0)\n",
117
+ " end_with_context = min(seq_end_idx + context_size, total_frames - 1)\n",
118
+ " \n",
119
+ " return start_with_context, end_with_context\n"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "markdown",
124
+ "id": "7e946392-df5b-4773-9808-55cc58c57840",
125
+ "metadata": {},
126
+ "source": [
127
+ "## Undersample tackle-sequences"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": null,
133
+ "id": "b2f72e9f-9755-4f59-bf06-242d3e0696fc",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "import numpy as np\n",
138
+ "import torch\n",
139
+ "\n",
140
+ "def extract_tackle_sequences_and_undersample(X, y, frame_counts, device='cuda', max_tackles=500):\n",
141
+ " new_X, new_y, kept_indices = [], [], []\n",
142
+ " all_seq = []\n",
143
+ " class_lengths = {} # Stores sequence lengths and counts by class\n",
144
+ " total_sequences = 0 # Total count of all sequences\n",
145
+ " tackle_count = 0 # Counter for class 0 sequences\n",
146
+ " tackle_count2 = 0 # Counter for class 2 sequences\n",
147
+ "\n",
148
+ " start_idx = 0\n",
149
+ " last_context_end = -1\n",
150
+ "\n",
151
+ " for z, count in enumerate(frame_counts):\n",
152
+ " end_idx = start_idx + count\n",
153
+ "\n",
154
+ " key_frame_indices = np.where(y[start_idx:end_idx] != 0)[0] + start_idx\n",
155
+ " seq_splitted = split_sequences_np(key_frame_indices)\n",
156
+ "\n",
157
+ " background_frame_indices = np.where(y[start_idx:end_idx] == 0)[0] + start_idx\n",
158
+ " bg_seq_splitted = split_sequences_np(background_frame_indices)\n",
159
+ "\n",
160
+ " for bg_seq in bg_seq_splitted:\n",
161
+ " if len(bg_seq) >= 70:\n",
162
+ " if y[bg_seq[0]] == 0: # Check if the sequence is of class 0\n",
163
+ " if tackle_count >= max_tackles:\n",
164
+ " continue # Skip if maximum number of tackles has been reached\n",
165
+ " else:\n",
166
+ " tackle_count += 1\n",
167
+ " start_random = np.random.randint(35, len(bg_seq) - 34)\n",
168
+ " random_seq = bg_seq[start_random:start_random + 25]\n",
169
+ " all_seq.append(random_seq)\n",
170
+ " total_sequences += 1\n",
171
+ "\n",
172
+ " class_id = y[random_seq[0]]\n",
173
+ " if class_id not in class_lengths:\n",
174
+ " class_lengths[class_id] = {'lengths': [], 'count': 0}\n",
175
+ " class_lengths[class_id]['lengths'].append(len(random_seq))\n",
176
+ " class_lengths[class_id]['count'] += 1\n",
177
+ "\n",
178
+ " new_bg_seq_x = X[random_seq[0]:random_seq[-1] + 1]\n",
179
+ " new_bg_seq_y = y[random_seq[0]:random_seq[-1] + 1]\n",
180
+ "\n",
181
+ " new_X.extend(new_bg_seq_x)\n",
182
+ " new_y.extend(new_bg_seq_y)\n",
183
+ "\n",
184
+ " for seq in seq_splitted:\n",
185
+ " if seq.size > 0:\n",
186
+ " if y[seq[0]] == 2: # Check if the sequence is of class 0\n",
187
+ " if tackle_count2 >= 280:\n",
188
+ " continue # Skip if maximum number of tackles has been reached\n",
189
+ " else:\n",
190
+ " tackle_count2 += 1\n",
191
+ "\n",
192
+ " all_seq.append(seq)\n",
193
+ " total_sequences += 1\n",
194
+ "\n",
195
+ " class_id = y[seq[0]]\n",
196
+ " if class_id not in class_lengths:\n",
197
+ " class_lengths[class_id] = {'lengths': [], 'count': 0}\n",
198
+ " class_lengths[class_id]['lengths'].append(len(seq))\n",
199
+ " class_lengths[class_id]['count'] += 1\n",
200
+ "\n",
201
+ " \n",
202
+ " # This line will return the same values for start and end, as 0 context is added. background is sampled randomly instead in loop before this.\n",
203
+ " context_start, context_end = add_context_frames(seq, end_idx, last_context_end=last_context_end)\n",
204
+ " \n",
205
+ " new_X.extend(X[context_start:context_end + 1])\n",
206
+ " new_y.extend(y[context_start:context_end + 1])\n",
207
+ "\n",
208
+ " last_context_end = context_end\n",
209
+ "\n",
210
+ " start_idx = end_idx\n",
211
+ "\n",
212
+ " new_X = torch.stack(new_X)\n",
213
+ " new_X = torch.tensor(new_X, dtype=torch.float32, device=device)\n",
214
+ " new_y = torch.tensor(new_y, dtype=torch.long, device=device)\n",
215
+ "\n",
216
+ " return new_X, new_y, kept_indices, all_seq, class_lengths, total_sequences\n",
217
+ "\n",
218
+ "new_X, new_y, kept_indices, all_seq, class_lengths, total_sequences = extract_tackle_sequences_and_undersample(X, y, frame_counts)\n",
219
+ "\n",
220
+ "# Compute and print average lengths and counts for each class\n",
221
+ "for class_id, info in class_lengths.items():\n",
222
+ " average_length = np.mean(info['lengths'])\n",
223
+ " print(f'Class {class_id} - Average Sequence Length: {average_length:.2f}, Count: {info[\"count\"]}')\n",
224
+ "\n",
225
+ "print(f'Total sequences processed: {total_sequences}')\n",
226
+ "print(new_X.shape)\n",
227
+ "print(new_y.shape)\n",
228
+ "print(np.unique(new_y.cpu(), return_counts=True))\n"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "markdown",
233
+ "id": "efe164b4-4541-4c9e-8ec4-af08d33062db",
234
+ "metadata": {},
235
+ "source": [
236
+ "## Split games into either train, val, test to ensure no data leakage"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": 7,
242
+ "id": "0fd784d3-b7d5-41d7-bc18-9429fa07f2f4",
243
+ "metadata": {},
244
+ "outputs": [],
245
+ "source": [
246
+ "import numpy as np\n",
247
+ "import torch\n",
248
+ "\n",
249
+ "def split_data_by_game(X, y, kept_indices, frame_counts, split_ratio=(0.7, 0.15, 0.15), seed=None):\n",
250
+ " # Set the random seed for reproducibility\n",
251
+ " np.random.seed(seed)\n",
252
+ " \n",
253
+ " # Calculate the cumulative sum of frame counts to determine game boundaries\n",
254
+ " boundaries = np.cumsum(frame_counts)\n",
255
+ " \n",
256
+ " # Create lists to hold data for train, validation, and test sets\n",
257
+ " X_train, y_train, X_val, y_val, X_test, y_test = [], [], [], [], [], []\n",
258
+ " idx_train, idx_val, idx_test = [], [], []\n",
259
+ " \n",
260
+ " # Shuffle the indices to randomize which games go into which set\n",
261
+ " game_indices = np.arange(len(frame_counts))\n",
262
+ " np.random.shuffle(game_indices)\n",
263
+ " \n",
264
+ " total_games = len(game_indices)\n",
265
+ " num_train = int(total_games * split_ratio[0])\n",
266
+ " num_val = int(total_games * split_ratio[1])\n",
267
+ " \n",
268
+ " print('Number of total games: ', total_games)\n",
269
+ " print('Number of train games: ', num_train)\n",
270
+ " print('Number of val games: ', num_val)\n",
271
+ " \n",
272
+ " \n",
273
+ " # Assign games to train, validation, and test sets\n",
274
+ " train_games = game_indices[:num_train]\n",
275
+ " val_games = game_indices[num_train:num_train + num_val]\n",
276
+ " test_games = game_indices[num_train + num_val:]\n",
277
+ "\n",
278
+ "\n",
279
+ " # Extract data for each split\n",
280
+ " X_train, y_train, idx_train = extract_data(train_games, boundaries, X, y, kept_indices)\n",
281
+ " X_val, y_val, idx_val = extract_data(val_games, boundaries, X, y, kept_indices)\n",
282
+ " X_test, y_test, idx_test = extract_data(test_games, boundaries, X, y, kept_indices)\n",
283
+ " \n",
284
+ " return (X_train, y_train, idx_train), (X_val, y_val, idx_val), (X_test, y_test, idx_test)\n",
285
+ "\n",
286
+ "# Set the random seed for reproducibility\n",
287
+ "seed = 42\n",
288
+ "\n",
289
+ "split_ratio = (0.7, 0.15, 0.15)\n",
290
+ "(X_train, y_train, idx_train), (X_val, y_val, idx_val), (X_test, y_test, idx_test) = split_data_by_game(new_X, new_y, kept_indices, frame_counts, split_ratio, seed)\n",
291
+ "\n",
292
+ "print(np.unique(y_train.cpu(), return_counts=True)[1])\n",
293
+ "print(np.unique(y_val.cpu(), return_counts=True)[1])\n",
294
+ "print(np.unique(y_test.cpu(), return_counts=True)[1])\n"
295
+ ]
296
+ },
297
+ {
298
+ "cell_type": "markdown",
299
+ "id": "894031df-d48f-4f99-ab80-1ad37cd021e3",
300
+ "metadata": {},
301
+ "source": [
302
+ "## Create tensors"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": 8,
308
+ "id": "40f2921a-5ea6-40fc-898b-58816a1887ab",
309
+ "metadata": {},
310
+ "outputs": [],
311
+ "source": [
312
+ "X_train = torch.tensor(X_train, dtype=torch.float32, device=device)\n",
313
+ "y_train = torch.tensor(y_train, dtype=torch.long, device=device)\n",
314
+ "\n",
315
+ "X_val = torch.tensor(X_val, dtype=torch.float32, device=device)\n",
316
+ "y_val = torch.tensor(y_val, dtype=torch.long, device=device)\n",
317
+ "\n",
318
+ "X_test = torch.tensor(X_test, dtype=torch.float32, device=device)\n",
319
+ "y_test = torch.tensor(y_test, dtype=torch.long, device=device)"
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "markdown",
324
+ "id": "69eb8af6-0455-4f01-b7ca-19faf2027041",
325
+ "metadata": {},
326
+ "source": [
327
+ "## Shifting Window - Dataset + DataLoaders "
328
+ ]
329
+ },
330
+ {
331
+ "cell_type": "code",
332
+ "execution_count": 143,
333
+ "id": "e0f71c66-506d-428d-a4e5-008f3eb04fa4",
334
+ "metadata": {},
335
+ "outputs": [],
336
+ "source": [
337
+ "class FrameSequenceDataset(TensorDataset):\n",
338
+ " def __init__(self, X, y, window_size):\n",
339
+ " self.X = X\n",
340
+ " self.y = y\n",
341
+ " self.window_size = window_size\n",
342
+ " \n",
343
+ " def __len__(self):\n",
344
+ " # Calculate how many complete non-overlapping windows fit into the dataset\n",
345
+ " return (len(self.y) // self.window_size)\n",
346
+ " \n",
347
+ " def __getitem__(self, index):\n",
348
+ " # Calculate the start and end of the sequence based on the window size and index\n",
349
+ " start = index * self.window_size\n",
350
+ " end = start + self.window_size\n",
351
+ " X_seq = self.X[start:end]\n",
352
+ " \n",
353
+ " y_seq = self.y[start:end]\n",
354
+ " return X_seq, y_seq\n",
355
+ "\n",
356
+ "window_size = 75 # 25 fps\n",
357
+ "train_dataset = FrameSequenceDataset(X_train, y_train, window_size)\n",
358
+ "val_dataset = FrameSequenceDataset(X_val, y_val, window_size)\n",
359
+ "test_dataset = FrameSequenceDataset(X_test, y_test, window_size)\n",
360
+ "\n",
361
+ "\n",
362
+ "train_loader = DataLoader(train_dataset, batch_size=256, shuffle=False)\n",
363
+ "val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False)\n",
364
+ "test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)"
365
+ ]
366
+ },
367
+ {
368
+ "cell_type": "markdown",
369
+ "id": "ac9b8966-22a8-45d1-8991-170ea1311b1d",
370
+ "metadata": {},
371
+ "source": [
372
+ "## Sliding Window - Dataset + DataLoaders "
373
+ ]
374
+ },
375
+ {
376
+ "cell_type": "code",
377
+ "execution_count": 152,
378
+ "id": "07938ef3-2bac-4ac3-805b-f01e0920540e",
379
+ "metadata": {},
380
+ "outputs": [],
381
+ "source": [
382
+ "class FrameSequenceDataset(TensorDataset):\n",
383
+ " def __init__(self, X, y, window_size):\n",
384
+ " self.X = X\n",
385
+ " self.y = y\n",
386
+ " self.window_size = window_size\n",
387
+ " \n",
388
+ " def __len__(self):\n",
389
+ " # Adjust the length to allow for complete windows only\n",
390
+ " return len(self.y) - self.window_size + 1\n",
391
+ " \n",
392
+ " def __getitem__(self, index):\n",
393
+ " # Extract a window of data starting at `index`\n",
394
+ " X_seq = self.X[index:index + self.window_size]\n",
395
+ " y_seq = self.y[index:index + self.window_size]\n",
396
+ " return X_seq, y_seq\n",
397
+ "\n",
398
+ "\n",
399
+ "window_size = 25 # 25 fps\n",
400
+ "train_dataset = FrameSequenceDataset(X_train, y_train, window_size)\n",
401
+ "val_dataset = FrameSequenceDataset(X_val, y_val, window_size)\n",
402
+ "test_dataset = FrameSequenceDataset(X_test, y_test, window_size)\n",
403
+ "\n",
404
+ "\n",
405
+ "train_loader = DataLoader(train_dataset, batch_size=128, shuffle=False)\n",
406
+ "val_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)\n",
407
+ "test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)"
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "markdown",
412
+ "id": "41cfd268-5fc0-40bd-84c6-6a2ce89abdf4",
413
+ "metadata": {
414
+ "tags": []
415
+ },
416
+ "source": [
417
+ "## TempTAC and Attention-module"
418
+ ]
419
+ },
420
+ {
421
+ "cell_type": "code",
422
+ "execution_count": 35,
423
+ "id": "2005f77b-9722-41bf-ac86-ceaef2912bde",
424
+ "metadata": {},
425
+ "outputs": [],
426
+ "source": [
427
+ "class AttentionModule(nn.Module):\n",
428
+ " def __init__(self, input_size, heads):\n",
429
+ " super().__init__()\n",
430
+ " self.self_attention = nn.MultiheadAttention(embed_dim=input_size, num_heads=heads)\n",
431
+ " \n",
432
+ " def forward(self, x):\n",
433
+ " # x: [seq_len, batch_size, features]\n",
434
+ " x = x.permute(1, 0, 2) # Adjusting for MultiheadAttention\n",
435
+ " attn_output, _ = self.self_attention(x, x, x)\n",
436
+ " return attn_output.permute(1, 0, 2) # Return same shape\n",
437
+ "\n",
438
+ "\n",
439
+ "\n",
440
+ "class TempTAC(nn.Module):\n",
441
+ " def __init__(self, input_size, hidden_size, output_dim, num_layers, device, dropout_prob=0.5):\n",
442
+ " super().__init__()\n",
443
+ " self.lstm = nn.LSTM(input_size=input_size + output_dim, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=dropout_prob, bidirectional=True)\n",
444
+ " self.linear = nn.Linear(hidden_size * 2, output_dim)\n",
445
+ " self.dropout = nn.Dropout(dropout_prob)\n",
446
+ " self.device = device\n",
447
+ " self.attention = AttentionModule(input_size, heads=8) # Attention layer after LSTM\n",
448
+ " self.prev_output_weight = nn.Parameter(torch.tensor(1.0)) # Initialize learnable weight for previous output influence\n",
449
+ " self.hidden_size = hidden_size\n",
450
+ " self.num_layers = num_layers\n",
451
+ " self.output_dim = output_dim\n",
452
+ " \n",
453
+ " \n",
454
+ " def init_hidden(self, batch_size):\n",
455
+ " return (torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(self.device),\n",
456
+ " torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(self.device))\n",
457
+ "\n",
458
+ " def forward(self, x, hidden, current_batch_size):\n",
459
+ " batch_size, seq_len, _ = x.size()\n",
460
+ " prev_output = torch.zeros(batch_size, 1, self.output_dim, device=self.device)\n",
461
+ " \n",
462
+ " # Apply attention to the output of the current timestep\n",
463
+ " x = self.attention(x) # Remove sequence length dimension for attention\n",
464
+ " \n",
465
+ " outputs = []\n",
466
+ " for t in range(seq_len):\n",
467
+ " weighted_prev_output = prev_output * self.prev_output_weight # Apply learned weight to previous output\n",
468
+ " \n",
469
+ " lstm_input = torch.cat((x[:, t:t+1, :], weighted_prev_output), dim=-1)\n",
470
+ " \n",
471
+ " lstm_out, hidden = self.lstm(lstm_input, hidden)\n",
472
+ " lstm_out = self.dropout(lstm_out)\n",
473
+ " \n",
474
+ " prev_output = self.linear(lstm_out) # Linear layer and unsqueeze to match dimensions\n",
475
+ " outputs.append(prev_output)\n",
476
+ "\n",
477
+ " return torch.cat(outputs, dim=1), hidden"
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "id": "f08e28c7-2082-42e6-97dc-9144f44f5227",
483
+ "metadata": {
484
+ "tags": []
485
+ },
486
+ "source": [
487
+ "### Models parameter counter"
488
+ ]
489
+ },
490
+ {
491
+ "cell_type": "code",
492
+ "execution_count": 317,
493
+ "id": "0616329c-5aaf-4681-a820-ea078f80042a",
494
+ "metadata": {},
495
+ "outputs": [],
496
+ "source": [
497
+ "def count_parameters(model):\n",
498
+ " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
499
+ "\n",
500
+ "#model = EnhancedMultiLayerClassifier(1024, 3)\n",
501
+ "print(\"Number of trainable parameters:\", count_parameters(model))\n",
502
+ "print(\"Number of training instances:\", sum(np.unique(y_train.cpu(), return_counts=True)[1]))"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "markdown",
507
+ "id": "cd58d059-8f30-4562-aac3-82762f4afbe7",
508
+ "metadata": {
509
+ "tags": []
510
+ },
511
+ "source": [
512
+ "## Training Loop"
513
+ ]
514
+ },
515
+ {
516
+ "cell_type": "code",
517
+ "execution_count": 133,
518
+ "id": "aba45731-cd21-41bc-8812-a3c7f30f1b06",
519
+ "metadata": {},
520
+ "outputs": [],
521
+ "source": [
522
+ "import torch\n",
523
+ "import torch.nn as nn\n",
524
+ "from sklearn.metrics import classification_report\n",
525
+ "from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\n",
526
+ "\n",
527
+ "\n",
528
+ "config = {'l1_lambda': 3.326004484093452e-05,\n",
529
+ "'lr': 0.0004256783934105,\n",
530
+ "'weight_decay': 1.4334181994254526e-05}\n",
531
+ "\n",
532
+ "\n",
533
+ "def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10):\n",
534
+ " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
535
+ " model.to(device)\n",
536
+ " \n",
537
+ " training_losses = []\n",
538
+ " validation_losses = []\n",
539
+ " \n",
540
+ " best_val = 100\n",
541
+ " break_margin = 2\n",
542
+ " \n",
543
+ " best_f1 = 0.0\n",
544
+ "\n",
545
+ " l1_lambda = config['l1_lambda']\n",
546
+ "\n",
547
+ " batch_size = 256\n",
548
+ " current_lr = optimizer.param_groups[0]['lr']\n",
549
+ " \n",
550
+ " \n",
551
+ " for epoch in range(num_epochs):\n",
552
+ " model.train()\n",
553
+ " total_train_loss = 0\n",
554
+ " \n",
555
+ " # Initialize hidden state for the first batch size\n",
556
+ " initial_batch = next(iter(train_loader))\n",
557
+ " \n",
558
+ " initial_batch_size = initial_batch[0].size(0)\n",
559
+ " \n",
560
+ " hidden = model.init_hidden(initial_batch_size)\n",
561
+ " clip_value = 1\n",
562
+ " for inputs, labels in train_loader:\n",
563
+ " inputs, labels = inputs.to(device), labels.to(device)\n",
564
+ " current_batch_size = inputs.size(0)\n",
565
+ " \n",
566
+ " # Adjust hidden state size if current batch size differs from the initial batch size\n",
567
+ " if current_batch_size != initial_batch_size:\n",
568
+ " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n",
569
+ " hidden[1][:, :current_batch_size, :].contiguous())\n",
570
+ "\n",
571
+ " else:\n",
572
+ " adjusted_hidden = hidden\n",
573
+ " \n",
574
+ " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n",
575
+ " \n",
576
+ " hidden = tuple([h.data for h in adjusted_hidden]) # Detach hidden state from graph to prevent backprop through entire dataset\n",
577
+ " \n",
578
+ " optimizer.zero_grad()\n",
579
+ " \n",
580
+ " \n",
581
+ " # Calculate loss for the entire sequence at once\n",
582
+ " loss = criterion(outputs.transpose(1, 2), labels)\n",
583
+ " \n",
584
+ " #l1_norm = sum(torch.linalg.norm(p, 1) for p in model.parameters())\n",
585
+ " l1_norm = sum(p.abs().sum() for p in model.parameters())\n",
586
+ "\n",
587
+ " loss = loss + l1_lambda * l1_norm\n",
588
+ " \n",
589
+ " loss.backward()\n",
590
+ " torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value)\n",
591
+ " optimizer.step()\n",
592
+ " total_train_loss += loss.item()\n",
593
+ "\n",
594
+ " average_train_loss = total_train_loss / len(train_loader)\n",
595
+ " training_losses.append(average_train_loss) \n",
596
+ " \n",
597
+ " \n",
598
+ " model.eval()\n",
599
+ " total_val_loss = 0\n",
600
+ " all_preds = []\n",
601
+ " all_targets = []\n",
602
+ " total, correct = 0, 0\n",
603
+ " with torch.no_grad():\n",
604
+ " initial_batch = next(iter(val_loader))\n",
605
+ " initial_batch_size = initial_batch[0].size(0)\n",
606
+ " hidden = model.init_hidden(initial_batch_size)\n",
607
+ " \n",
608
+ " for inputs, labels in val_loader:\n",
609
+ " inputs, labels = inputs.to(device), labels.to(device)\n",
610
+ " current_batch_size = inputs.size(0)\n",
611
+ " \n",
612
+ " # Adjust hidden state size if current batch size differs from the initial batch size\n",
613
+ " if current_batch_size != initial_batch_size:\n",
614
+ " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n",
615
+ " hidden[1][:, :current_batch_size, :].contiguous())\n",
616
+ " else:\n",
617
+ " adjusted_hidden = hidden\n",
618
+ " \n",
619
+ " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n",
620
+ " val_loss = criterion(outputs.transpose(1, 2), labels)\n",
621
+ " total_val_loss += val_loss.item()\n",
622
+ "\n",
623
+ " for i in range(outputs.shape[1]):\n",
624
+ " _, predicted = torch.max(outputs[:, i, :].data, 1)\n",
625
+ " total += labels[:, i].size(0)\n",
626
+ " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n",
627
+ " \n",
628
+ " all_preds.extend(predicted.cpu().numpy())\n",
629
+ " all_targets.extend(labels[:, i].cpu().numpy())\n",
630
+ "\n",
631
+ " average_val_loss = total_val_loss / len(val_loader)\n",
632
+ " validation_losses.append(average_val_loss) \n",
633
+ " accuracy = 100 * correct / total\n",
634
+ " scheduler.step(val_loss)\n",
635
+ " \n",
636
+ " precision, recall, f1, _ = precision_recall_fscore_support(np.array(all_targets).flatten(), np.array(all_preds).flatten(), average='weighted', zero_division=0)\n",
637
+ " \n",
638
+ " if f1 > best_f1:\n",
639
+ " best_f1 = f1\n",
640
+ " best_epoch = epoch\n",
641
+ " best_model_state_dict = model.state_dict()\n",
642
+ " best_all_targets = all_targets\n",
643
+ " best_all_preds = all_preds\n",
644
+ " \n",
645
+ " if (int(epoch) % 5) == 0:\n",
646
+ " print(f'Epoch [{epoch+1}/{num_epochs}], Training Loss: {average_train_loss:.4f}, Validation Loss: {average_val_loss:.4f}, Accuracy: {accuracy:.2f}%')\n",
647
+ " print(f'Epoch [{epoch+1}/{num_epochs}] Current Learning Rate: {current_lr}')\n",
648
+ " \n",
649
+ " \n",
650
+ " current_lr = optimizer.param_groups[0]['lr']\n",
651
+ " \n",
652
+ " if average_val_loss < best_val:\n",
653
+ " best_val = average_val_loss\n",
654
+ " time_to_break = 0\n",
655
+ " print('New best val loss: ', average_val_loss)\n",
656
+ " else:\n",
657
+ " time_to_break += 1\n",
658
+ " \n",
659
+ " if time_to_break > break_margin:\n",
660
+ " print('Break margin hit!')\n",
661
+ " break\n",
662
+ "\n",
663
+ " \n",
664
+ " return best_model_state_dict, best_all_targets, best_all_preds, validation_losses, training_losses\n",
665
+ "\n",
666
+ "\n",
667
+ "model = TempTAC(input_size=1024, hidden_size=256, output_dim=3, num_layers=2, device=device, dropout_prob=0.5)\n",
668
+ "model\n",
669
+ "\n",
670
+ "classes = np.unique(y_train.cpu())\n",
671
+ "weights = compute_class_weight(class_weight='balanced', classes=classes, y=y_train.cpu().numpy())\n",
672
+ "class_weights = torch.tensor(weights, dtype=torch.float32).to(device)\n",
673
+ "\n",
674
+ "optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n",
675
+ "\n",
676
+ "criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)\n",
677
+ "scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=4, verbose=True)\n",
678
+ "\n",
679
+ "best_model_state_dict, all_targets, all_preds, validation_losses, training_losses = train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=400)\n",
680
+ "\n",
681
+ "all_targets = np.array(all_targets).flatten()\n",
682
+ "all_preds = np.array(all_preds).flatten()\n",
683
+ "\n",
684
+ "model.load_state_dict(best_model_state_dict)\n",
685
+ "\n",
686
+ "print(classification_report(all_targets, all_preds, target_names=['background', 'tackle-live', 'tackle-replay']))\n"
687
+ ]
688
+ },
689
+ {
690
+ "cell_type": "markdown",
691
+ "id": "9d90b440-75a7-40d0-92f7-770973c52c48",
692
+ "metadata": {},
693
+ "source": [
694
+ "## Optimize hyperparams"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "code",
699
+ "execution_count": null,
700
+ "id": "79d2914f-7397-42a8-9ba3-180b7aeba5cd",
701
+ "metadata": {},
702
+ "outputs": [],
703
+ "source": [
704
+ "import logging\n",
705
+ "import sys\n",
706
+ "import torch\n",
707
+ "import pandas as pd\n",
708
+ "import torch.nn as nn\n",
709
+ "from sklearn.metrics import classification_report\n",
710
+ "from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\n",
711
+ "\n",
712
+ "\n",
713
+ "def objective(trial, train_loader, val_loader, num_epochs=10, device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")):\n",
714
+ " \n",
715
+ " model = TempTAC(input_size=1024, hidden_size=256, output_dim=3, num_layers=2, device=device, dropout_prob=0.5)\n",
716
+ "\n",
717
+ " \n",
718
+ " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
719
+ " model.to(device)\n",
720
+ " \n",
721
+ " training_losses, validation_losses = [], []\n",
722
+ " \n",
723
+ " best_val = float('inf')\n",
724
+ " break_margin = 3\n",
725
+ " best_f1 = 0.0\n",
726
+ " \n",
727
+ " # Updated to use suggest_float with log=True\n",
728
+ " lr = trial.suggest_float('lr', 1e-6, 1e-2, log=True)\n",
729
+ " weight_decay = trial.suggest_float('weight_decay', 1e-6, 1e-3, log=True)\n",
730
+ " l1_lambda = trial.suggest_float('l1_lambda', 0, 0.001)\n",
731
+ " \n",
732
+ " classes = np.unique(y_train.cpu())\n",
733
+ " weights = compute_class_weight(class_weight='balanced', classes=classes, y=y_train.cpu().numpy())\n",
734
+ " class_weights = torch.tensor(weights, dtype=torch.float32).to(device)\n",
735
+ "\n",
736
+ "\n",
737
+ " optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n",
738
+ " criterion = nn.CrossEntropyLoss(weight=class_weights).to(device) \n",
739
+ " scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=4, verbose=True)\n",
740
+ " \n",
741
+ " batch_size = 256\n",
742
+ " \n",
743
+ " current_lr = optimizer.param_groups[0]['lr']\n",
744
+ " \n",
745
+ " \n",
746
+ " for epoch in range(num_epochs):\n",
747
+ " model.train()\n",
748
+ " total_train_loss = 0\n",
749
+ "\n",
750
+ " # Initialize hidden state for the first batch size\n",
751
+ " initial_batch = next(iter(train_loader))\n",
752
+ " \n",
753
+ " initial_batch_size = initial_batch[0].size(0)\n",
754
+ " \n",
755
+ " hidden = model.init_hidden(initial_batch_size)\n",
756
+ " clip_value = 1\n",
757
+ " for inputs, labels in train_loader:\n",
758
+ " inputs, labels = inputs.to(device), labels.to(device)\n",
759
+ " current_batch_size = inputs.size(0)\n",
760
+ " \n",
761
+ " # Adjust hidden state size if current batch size differs from the initial batch size\n",
762
+ " if current_batch_size != initial_batch_size:\n",
763
+ " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n",
764
+ " hidden[1][:, :current_batch_size, :].contiguous())\n",
765
+ "\n",
766
+ " else:\n",
767
+ " adjusted_hidden = hidden\n",
768
+ " \n",
769
+ " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n",
770
+ " \n",
771
+ " hidden = tuple([h.data for h in adjusted_hidden]) # Detach hidden state from graph to prevent backprop through entire dataset\n",
772
+ " \n",
773
+ " optimizer.zero_grad()\n",
774
+ " \n",
775
+ " # Calculate loss for the entire sequence at once\n",
776
+ " loss = criterion(outputs.transpose(1, 2), labels)\n",
777
+ " \n",
778
+ " #l1_norm = sum(torch.linalg.norm(p, 1) for p in model.parameters())\n",
779
+ " l1_norm = sum(p.abs().sum() for p in model.parameters())\n",
780
+ "\n",
781
+ " loss = loss + l1_lambda * l1_norm\n",
782
+ " \n",
783
+ " loss.backward()\n",
784
+ " torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value) # gradient clipping\n",
785
+ " optimizer.step()\n",
786
+ " total_train_loss += loss.item()\n",
787
+ "\n",
788
+ " average_train_loss = total_train_loss / len(train_loader)\n",
789
+ " training_losses.append(average_train_loss) \n",
790
+ " \n",
791
+ " \n",
792
+ " model.eval()\n",
793
+ " total_val_loss = 0\n",
794
+ " all_preds = []\n",
795
+ " all_targets = []\n",
796
+ " total, correct = 0, 0\n",
797
+ " with torch.no_grad():\n",
798
+ " initial_batch = next(iter(val_loader))\n",
799
+ " initial_batch_size = initial_batch[0].size(0)\n",
800
+ " hidden = model.init_hidden(initial_batch_size)\n",
801
+ " \n",
802
+ " for inputs, labels in val_loader:\n",
803
+ " inputs, labels = inputs.to(device), labels.to(device)\n",
804
+ " current_batch_size = inputs.size(0)\n",
805
+ " \n",
806
+ " # Adjust hidden state size if current batch size differs from the initial batch size\n",
807
+ " if current_batch_size != initial_batch_size:\n",
808
+ " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n",
809
+ " hidden[1][:, :current_batch_size, :].contiguous())\n",
810
+ " else:\n",
811
+ " adjusted_hidden = hidden\n",
812
+ " \n",
813
+ " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n",
814
+ " val_loss = criterion(outputs.transpose(1, 2), labels)\n",
815
+ " total_val_loss += val_loss.item()\n",
816
+ "\n",
817
+ " for i in range(outputs.shape[1]):\n",
818
+ " _, predicted = torch.max(outputs[:, i, :].data, 1)\n",
819
+ " total += labels[:, i].size(0)\n",
820
+ " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n",
821
+ " \n",
822
+ " all_preds.extend(predicted.cpu().numpy())\n",
823
+ " all_targets.extend(labels[:, i].cpu().numpy())\n",
824
+ "\n",
825
+ " average_val_loss = total_val_loss / len(val_loader)\n",
826
+ " validation_losses.append(average_val_loss) \n",
827
+ " accuracy = 100 * correct / total\n",
828
+ " scheduler.step(average_val_loss)\n",
829
+ " \n",
830
+ " precision, recall, f1, _ = precision_recall_fscore_support(np.array(all_targets).flatten(), np.array(all_preds).flatten(), average='weighted', zero_division=0)\n",
831
+ " \n",
832
+ " if f1 > best_f1:\n",
833
+ " best_f1 = f1\n",
834
+ " best_epoch = epoch\n",
835
+ " best_model_state_dict = model.state_dict()\n",
836
+ " best_all_targets = all_targets\n",
837
+ " best_all_preds = all_preds\n",
838
+ " \n",
839
+ " current_lr = optimizer.param_groups[0]['lr']\n",
840
+ " \n",
841
+ " if val_loss < best_val:\n",
842
+ " best_val = val_loss\n",
843
+ " time_to_break = 0\n",
844
+ " else:\n",
845
+ " time_to_break += 1\n",
846
+ " \n",
847
+ " if time_to_break > break_margin:\n",
848
+ " #print('Break margin hit!')\n",
849
+ " break\n",
850
+ " \n",
851
+ " trial.report(f1, epoch)\n",
852
+ " if trial.should_prune():\n",
853
+ " raise optuna.TrialPruned()\n",
854
+ " \n",
855
+ " \n",
856
+ "\n",
857
+ " \n",
858
+ " return f1\n",
859
+ "\n",
860
+ "\n",
861
+ "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler())\n",
862
+ "\n",
863
+ "optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n",
864
+ "\n",
865
+ "study.optimize(lambda trial: objective(trial, train_loader, val_loader, num_epochs=1000, device=device), n_trials=100)\n",
866
+ "\n",
867
+ "\n",
868
+ "# Save study data to a df\n",
869
+ "study_data = study.trials_dataframe()\n",
870
+ "\n",
871
+ "# Save the df to a csv\n",
872
+ "study_data.to_csv('study_data.csv', index=False)\n",
873
+ "\n",
874
+ "print(\"Best trial:\")\n",
875
+ "trial = study.best_trial\n",
876
+ "print(f\" Value: {trial.value}\")\n",
877
+ "print(\" Params: \")\n",
878
+ "for key, value in trial.params.items():\n",
879
+ " print(f\" {key}: {value}\")"
880
+ ]
881
+ },
882
+ {
883
+ "cell_type": "markdown",
884
+ "id": "07e04c1d-2a4c-43f5-a0c8-1fb20f84959f",
885
+ "metadata": {},
886
+ "source": [
887
+ "## Read saved optuna-study"
888
+ ]
889
+ },
890
+ {
891
+ "cell_type": "code",
892
+ "execution_count": 27,
893
+ "id": "ce47bc92-5b33-4561-bbc9-edd10f6a86d7",
894
+ "metadata": {},
895
+ "outputs": [],
896
+ "source": [
897
+ "# Load the csv saved above\n",
898
+ "study_data = pd.read_csv('study_data.csv')\n",
899
+ "\n",
900
+ "# Find the best trial (higher value is better = idxmax, min use idxmin)\n",
901
+ "best_trial = study_data.loc[study_data['value'].idxmax()]\n",
902
+ "\n",
903
+ "# Print info about best trial\n",
904
+ "print(\"Best Trial:\")\n",
905
+ "print(best_trial)\n",
906
+ "\n",
907
+ "# Print parameters of the best trial\n",
908
+ "print(\"\\nBest Trial Parameters:\")\n",
909
+ "for param in [col for col in study_data.columns if col.startswith('params_')]:\n",
910
+ " print(f\"{param.replace('params_', '')}: {best_trial[param]}\")\n"
911
+ ]
912
+ },
913
+ {
914
+ "cell_type": "code",
915
+ "execution_count": 1250,
916
+ "id": "afae28e4-417f-48a5-baf9-7e1be9aa3e15",
917
+ "metadata": {},
918
+ "outputs": [],
919
+ "source": [
920
+ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n",
921
+ "conf_matrix = confusion_matrix(np.array(all_targets).flatten(), np.array(all_preds).flatten())\n",
922
+ "# conf_matrix = confusion_matrix(all_preds, all_targets)\n",
923
+ "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n",
924
+ "#labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n",
925
+ "\n",
926
+ " \n",
927
+ "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n",
928
+ "# plt.title('Confusion Matrix')\n",
929
+ "plt.xlabel('Predicted Label')\n",
930
+ "plt.ylabel('True Label')\n",
931
+ "#plt.savefig(\"new_best_8_context_frames_window_25.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
932
+ "plt.show()"
933
+ ]
934
+ },
935
+ {
936
+ "cell_type": "markdown",
937
+ "id": "fa1b431a-63e7-4eb1-bca0-1d1c33ba0101",
938
+ "metadata": {
939
+ "tags": []
940
+ },
941
+ "source": [
942
+ "## Plot loss"
943
+ ]
944
+ },
945
+ {
946
+ "cell_type": "code",
947
+ "execution_count": 134,
948
+ "id": "1f6c292f-e5d9-4aad-a4df-fa0be0bb34d8",
949
+ "metadata": {},
950
+ "outputs": [],
951
+ "source": [
952
+ "scaler = MinMaxScaler()\n",
953
+ "\n",
954
+ "length=len(training_losses)\n",
955
+ "\n",
956
+ "# For better plotting, we normalize\n",
957
+ "training_losses_normalized = scaler.fit_transform(np.array(training_losses).reshape(-1, 1)).flatten()\n",
958
+ "validation_losses_normalized = scaler.fit_transform(np.array(validation_losses).reshape(-1, 1)).flatten()\n",
959
+ "\n",
960
+ "# Update df with normalized values\n",
961
+ "df = pd.DataFrame({\n",
962
+ " 'Epoch': np.arange(length),\n",
963
+ " 'Train Loss': training_losses_normalized,\n",
964
+ " 'Validation Loss': validation_losses_normalized\n",
965
+ "})\n",
966
+ "\n",
967
+ "# Plot normalized losses\n",
968
+ "plt.figure(figsize=(10, 6))\n",
969
+ "plt.plot(df['Epoch'], df['Train Loss'], label='Train Loss (Normalized)', marker='o')\n",
970
+ "plt.plot(df['Epoch'], df['Validation Loss'], label='Validation Loss (Normalized)', marker='o')\n",
971
+ "plt.title('Training vs Validation Loss (Normalized)')\n",
972
+ "plt.xlabel('Epoch')\n",
973
+ "plt.ylabel('Normalized Loss')\n",
974
+ "plt.legend()\n",
975
+ "plt.grid(True)\n",
976
+ "plt.tight_layout()\n",
977
+ "\n",
978
+ "# Save loss\n",
979
+ "#np.save('Optimized-reflected-run/training_losses-undersampled-window-75-sliding-window.npy', training_losses)\n",
980
+ "#np.save('Optimized-reflected-run/validation_losses-undersampled-window-75-sliding-window.npy', validation_losses)\n",
981
+ "#plt.savefig(f'Optimized-reflected-run/train_val_loss-undersampled-window-75-sliding-window.pdf', format='pdf')\n",
982
+ "\n",
983
+ "plt.show()"
984
+ ]
985
+ },
986
+ {
987
+ "cell_type": "markdown",
988
+ "id": "6d39fcce-8d36-42ed-ab48-2721e4d6dc20",
989
+ "metadata": {},
990
+ "source": [
991
+ "## Run inference on test-set"
992
+ ]
993
+ },
994
+ {
995
+ "cell_type": "code",
996
+ "execution_count": 153,
997
+ "id": "a03c40bf-6f14-4f48-a09a-5736ed2ce083",
998
+ "metadata": {},
999
+ "outputs": [],
1000
+ "source": [
1001
+ "# For loading prev models\n",
1002
+ "# model.load_state_dict(torch.load('Optimized-reflected-run/sliding-window/best_lstm-undersampled-window-25-sliding-window.pt'))\n",
1003
+ "\n",
1004
+ "def run_inference_test_set(): \n",
1005
+ " all_targets = []\n",
1006
+ " all_preds = []\n",
1007
+ "\n",
1008
+ " initial_batch_size = 256\n",
1009
+ "\n",
1010
+ " initial_batch = next(iter(test_loader))\n",
1011
+ "\n",
1012
+ " initial_batch_size = initial_batch[0].size(0)\n",
1013
+ "\n",
1014
+ " hidden = model.init_hidden(initial_batch_size)\n",
1015
+ "\n",
1016
+ " total_val_loss = 0\n",
1017
+ " total = 0\n",
1018
+ " correct = 0\n",
1019
+ "\n",
1020
+ " for i, (inputs, labels) in enumerate(test_loader):\n",
1021
+ " inputs, labels = inputs.to(device), labels.to(device)\n",
1022
+ " current_batch_size = inputs.size(0)\n",
1023
+ "\n",
1024
+ " # Adjust hidden state size if current batch size differs from the initial batch size\n",
1025
+ " if current_batch_size != initial_batch_size:\n",
1026
+ " adjusted_hidden = (hidden[0][:, :current_batch_size, :].contiguous(),\n",
1027
+ " hidden[1][:, :current_batch_size, :].contiguous())\n",
1028
+ " else:\n",
1029
+ " adjusted_hidden = hidden\n",
1030
+ "\n",
1031
+ "\n",
1032
+ "\n",
1033
+ " outputs, adjusted_hidden = model(inputs, adjusted_hidden, current_batch_size)\n",
1034
+ "\n",
1035
+ " val_loss = criterion(outputs.transpose(1, 2), labels)\n",
1036
+ " total_val_loss += val_loss.item()\n",
1037
+ "\n",
1038
+ " for i in range(outputs.shape[1]):\n",
1039
+ " _, predicted = torch.max(outputs[:, i, :].data, 1)\n",
1040
+ " total += labels[:, i].size(0)\n",
1041
+ " correct += (predicted.cpu() == labels[:, i].cpu()).sum().item()\n",
1042
+ "\n",
1043
+ " all_preds.extend(predicted.cpu().numpy())\n",
1044
+ " all_targets.extend(labels[:, i].cpu().numpy())\n",
1045
+ " return all_preds, all_targets \n",
1046
+ "\n",
1047
+ "\n",
1048
+ "test_predictions, test_targets = run_inference_test_set()\n",
1049
+ "\n",
1050
+ "\n",
1051
+ "# Plot classification report\n",
1052
+ "print(classification_report(test_targets, test_predictions, target_names=['background', 'tackle-live', 'tackle-replay']))\n",
1053
+ "\n",
1054
+ "# Create CM\n",
1055
+ "conf_matrix = confusion_matrix(test_targets, test_predictions)\n",
1056
+ "\n",
1057
+ "\n",
1058
+ "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n",
1059
+ "\n",
1060
+ " \n",
1061
+ "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n",
1062
+ "# plt.title('Confusion Matrix')\n",
1063
+ "plt.xlabel('Predicted Label')\n",
1064
+ "plt.ylabel('True Label')\n",
1065
+ "#plt.savefig(\"Optimized-reflected-run/best_lstm-undersampled-window-75-sliding-window.pdf\", format=\"pdf\", bbox_inches=\"tight\") \n",
1066
+ "plt.show()\n",
1067
+ "\n",
1068
+ "#torch.save(model.state_dict(), 'Optimized-reflected-run/best_lstm-undersampled-window-75-sliding-window.pt')"
1069
+ ]
1070
+ },
1071
+ {
1072
+ "cell_type": "markdown",
1073
+ "id": "e850e4dd-72e2-4c94-8938-483787968faf",
1074
+ "metadata": {},
1075
+ "source": [
1076
+ "## Print ROC-curves"
1077
+ ]
1078
+ },
1079
+ {
1080
+ "cell_type": "code",
1081
+ "execution_count": 154,
1082
+ "id": "bc0464b9-c3d7-4e4f-b821-7cca0b824b97",
1083
+ "metadata": {},
1084
+ "outputs": [],
1085
+ "source": [
1086
+ "from sklearn.metrics import roc_curve, auc\n",
1087
+ "from sklearn.preprocessing import label_binarize\n",
1088
+ "\n",
1089
+ "class_names = ['background', 'tackle-live', 'tackle-replay']\n",
1090
+ "n_classes = len(class_names)\n",
1091
+ "\n",
1092
+ "# binarize the targets and predictions for roc curve computation\n",
1093
+ "test_targets_bin = label_binarize(test_targets, classes=[0, 1, 2])\n",
1094
+ "test_predictions_bin = label_binarize(test_predictions, classes=[0, 1, 2])\n",
1095
+ "\n",
1096
+ "# roc curve and auc for each class\n",
1097
+ "fpr = {}\n",
1098
+ "tpr = {}\n",
1099
+ "roc_auc = {}\n",
1100
+ "\n",
1101
+ "for i in range(n_classes):\n",
1102
+ " fpr[i], tpr[i], _ = roc_curve(test_targets_bin[:, i], test_predictions_bin[:, i])\n",
1103
+ " roc_auc[i] = auc(fpr[i], tpr[i])\n",
1104
+ "\n",
1105
+ "# plot rpc curves for each class\n",
1106
+ "plt.figure(figsize=(8, 6))\n",
1107
+ "for i in range(n_classes):\n",
1108
+ " plt.plot(fpr[i], tpr[i], label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})')\n",
1109
+ " \n",
1110
+ " \n",
1111
+ "plt.plot([0, 1], [0, 1], 'k--')\n",
1112
+ "plt.xlim([0.0, 1.0])\n",
1113
+ "plt.grid(visible=True)\n",
1114
+ "plt.ylim([0.0, 1.05])\n",
1115
+ "plt.xlabel('False Positive Rate')\n",
1116
+ "plt.ylabel('True Positive Rate')\n",
1117
+ "plt.title('Multi-class ROC Curve')\n",
1118
+ "plt.legend(loc='lower right')\n",
1119
+ "#plt.savefig(\"Optimized-reflected-run/roc-curve-25-sliding-window.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
1120
+ "plt.show()"
1121
+ ]
1122
+ },
1123
+ {
1124
+ "cell_type": "markdown",
1125
+ "id": "796b2578-bc9d-40f9-80be-0dafa79f5920",
1126
+ "metadata": {},
1127
+ "source": [
1128
+ "## Baseline class"
1129
+ ]
1130
+ },
1131
+ {
1132
+ "cell_type": "code",
1133
+ "execution_count": 166,
1134
+ "id": "b1802e66",
1135
+ "metadata": {},
1136
+ "outputs": [],
1137
+ "source": [
1138
+ "class Simple1DCNN(nn.Module):\n",
1139
+ " def __init__(self, num_channels, num_classes):\n",
1140
+ " super(Simple1DCNN, self).__init__()\n",
1141
+ " self.conv1 = nn.Conv1d(in_channels=num_channels, out_channels=32, kernel_size=3, padding=1)\n",
1142
+ " self.fc1 = nn.Linear(32 * (sequence_length // 2), num_classes) # adjust based on pooling and input length\n",
1143
+ "\n",
1144
+ " def forward(self, x):\n",
1145
+ " # x.shape = (batch, channels, sequence_length)\n",
1146
+ " x = F.relu(self.conv1(x))\n",
1147
+ " x = F.max_pool1d(x, kernel_size=2)\n",
1148
+ " \n",
1149
+ " # flatten to fully connected layer\n",
1150
+ " x = x.view(x.size(0), -1)\n",
1151
+ " \n",
1152
+ " x = self.fc1(x)\n",
1153
+ " return x"
1154
+ ]
1155
+ },
1156
+ {
1157
+ "cell_type": "markdown",
1158
+ "id": "2b6936c0-3ced-4a97-b7d7-f4c86afdc5d9",
1159
+ "metadata": {},
1160
+ "source": [
1161
+ "## Loss plotting"
1162
+ ]
1163
+ },
1164
+ {
1165
+ "cell_type": "code",
1166
+ "execution_count": 245,
1167
+ "id": "73975c4d-ca4c-454f-9a62-9849037c7426",
1168
+ "metadata": {},
1169
+ "outputs": [],
1170
+ "source": [
1171
+ "import pandas as pd\n",
1172
+ "import seaborn as sns\n",
1173
+ "import matplotlib.pyplot as plt\n",
1174
+ "\n",
1175
+ "epochs = len(training_losses) # number of epochs\n",
1176
+ "\n",
1177
+ "df = pd.DataFrame({\n",
1178
+ " 'Epoch': [i for i in range(epochs)],\n",
1179
+ " 'Train Loss': training_losses,\n",
1180
+ " 'Validation Loss': validation_losses\n",
1181
+ "})\n",
1182
+ "\n",
1183
+ "plt.figure(figsize=(10, 6))\n",
1184
+ "plt.plot(df['Epoch'], df['Train Loss'], label='Train Loss', marker='o')\n",
1185
+ "plt.plot(df['Epoch'], df['Validation Loss'], label='Validation Loss', marker='o')\n",
1186
+ "plt.title('Training vs Validation Loss')\n",
1187
+ "plt.xlabel('Epoch')\n",
1188
+ "plt.ylabel('Loss')\n",
1189
+ "plt.legend()\n",
1190
+ "plt.grid(True)\n",
1191
+ "plt.tight_layout()\n",
1192
+ "\n",
1193
+ "plt.savefig(f'baseline_cnn_training_validation_loss_plot.pdf', format='pdf')\n",
1194
+ "plt.show()"
1195
+ ]
1196
+ }
1197
+ ],
1198
+ "metadata": {
1199
+ "kernelspec": {
1200
+ "display_name": "Python (evan31818)",
1201
+ "language": "python",
1202
+ "name": "evan31818"
1203
+ },
1204
+ "language_info": {
1205
+ "codemirror_mode": {
1206
+ "name": "ipython",
1207
+ "version": 3
1208
+ },
1209
+ "file_extension": ".py",
1210
+ "mimetype": "text/x-python",
1211
+ "name": "python",
1212
+ "nbconvert_exporter": "python",
1213
+ "pygments_lexer": "ipython3",
1214
+ "version": "3.8.19"
1215
+ }
1216
+ },
1217
+ "nbformat": 4,
1218
+ "nbformat_minor": 5
1219
+ }
spatial-approach.ipynb ADDED
@@ -0,0 +1,1692 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "11353711-2a7e-47a3-b2f0-287a6b5d2e99",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Imports"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": null,
14
+ "id": "d217c9c8-a9be-4920-9196-48e20a98db56",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n",
19
+ "from torch.utils.data import TensorDataset, DataLoader\n",
20
+ "from sklearn.model_selection import train_test_split\n",
21
+ "from sklearn.preprocessing import label_binarize\n",
22
+ "from sklearn.metrics import roc_curve, auc\n",
23
+ "from imblearn.over_sampling import SMOTE\n",
24
+ "from sklearn.decomposition import PCA\n",
25
+ "from collections import Counter\n",
26
+ "\n",
27
+ "import torch.nn.functional as F\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "import torch.optim as optim\n",
30
+ "import torch.nn as nn\n",
31
+ "import seaborn as sns\n",
32
+ "import numpy as np\n",
33
+ "\n",
34
+ "import imblearn\n",
35
+ "import optuna\n",
36
+ "import torch\n",
37
+ "import json\n",
38
+ "import os\n",
39
+ "\n",
40
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "id": "6270ebec-f9d2-4d5e-a146-7d16a149445e",
46
+ "metadata": {},
47
+ "source": [
48
+ "## Load CLS-tokens and map 'incomplete-classes' to their respective full classes"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": 6,
54
+ "id": "ae95e2b9-b741-4582-b36c-e47a05c56d4c",
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "X = torch.load('/home/evan/D1/project/code/sorted_cls_tokens_features.pt', map_location=device)\n",
59
+ "#X = torch.load('/home/evan/D1/project/code/stretched_cls_tokens.pt', map_location=device)\n",
60
+ "#X = torch.load('/home/evan/D1/project/code/reflected_cls_tokens.pt', map_location=device)\n",
61
+ "\n",
62
+ "y = np.load('/home/evan/D1/project/code/sorted_cls_tokens_labels.npy')\n",
63
+ "frame_counts = np.load('/home/evan/D1/project/code/frame_counts.npy')\n",
64
+ "\n",
65
+ "\n",
66
+ "class_mapping = {0:0, 1: 1, 2: 2, 3: 1, 4: 2}\n",
67
+ "\n",
68
+ "for i, label in enumerate(y):\n",
69
+ " y[i] = class_mapping[label]\n",
70
+ "print('Done')"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "markdown",
75
+ "id": "2114fe95-f71c-47fc-a954-975cbfec28d4",
76
+ "metadata": {},
77
+ "source": [
78
+ "## Split into train, val on games"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 7,
84
+ "id": "458096f5-39fb-4dda-9c26-b580213cc22b",
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "# Calculate cumulative start indices of each video in the concatenated tensor\n",
89
+ "cumulative_starts = np.insert(np.cumsum(frame_counts), 0, 0)[:-1]\n",
90
+ "\n",
91
+ "\n",
92
+ "split_ratio = (0.7, 0.15, 0.15) #train, validation, test\n",
93
+ "\n",
94
+ "num_videos = len(frame_counts)\n",
95
+ "num_train_videos = int(num_videos * split_ratio[0])\n",
96
+ "num_val_videos = int(num_videos * split_ratio[1])\n",
97
+ "\n",
98
+ "# Ensure total does not exceed the number of videos\n",
99
+ "num_test_videos = num_videos - num_train_videos - num_val_videos\n",
100
+ "\n",
101
+ "# Shuffle video indices to split into training, validation, and test sets\n",
102
+ "video_indices = np.arange(num_videos)\n",
103
+ "np.random.seed(42)\n",
104
+ "np.random.shuffle(video_indices)\n",
105
+ "\n",
106
+ "train_video_indices = video_indices[:num_train_videos]\n",
107
+ "val_video_indices = video_indices[num_train_videos:num_train_videos + num_val_videos]\n",
108
+ "test_video_indices = video_indices[num_train_videos + num_val_videos:]\n",
109
+ "\n",
110
+ "# Initialize lists for indices\n",
111
+ "train_indices, val_indices, test_indices = [], [], []\n",
112
+ "\n",
113
+ "# Populate the index lists\n",
114
+ "for idx in train_video_indices:\n",
115
+ " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n",
116
+ " train_indices.extend(range(start, end))\n",
117
+ "\n",
118
+ "for idx in val_video_indices:\n",
119
+ " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n",
120
+ " val_indices.extend(range(start, end))\n",
121
+ "\n",
122
+ "for idx in test_video_indices:\n",
123
+ " start, end = cumulative_starts[idx], cumulative_starts[idx] + frame_counts[idx]\n",
124
+ " test_indices.extend(range(start, end))\n",
125
+ "\n",
126
+ "# Convert indices to tensors and extract corresponding subsets\n",
127
+ "train_indices = torch.tensor(train_indices)\n",
128
+ "val_indices = torch.tensor(val_indices)\n",
129
+ "test_indices = torch.tensor(test_indices)\n",
130
+ "\n",
131
+ "X_train, y_train = X[train_indices], y[train_indices]\n",
132
+ "X_val, y_val = X[val_indices], y[val_indices]\n",
133
+ "X_test, y_test = X[test_indices], y[test_indices]"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "markdown",
138
+ "id": "b97efc93-d7e0-4a2a-aa36-ef2ade8a60e3",
139
+ "metadata": {},
140
+ "source": [
141
+ "## Undersample and create undersampled train, val, test datasets"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 8,
147
+ "id": "3af2b8d2-014b-439f-bf68-5d59d64c2812",
148
+ "metadata": {},
149
+ "outputs": [],
150
+ "source": [
151
+ "def undersample_data(X, y, target_counts):\n",
152
+ " unique_classes, counts = np.unique(y, return_counts=True)\n",
153
+ " undersampled_indices = []\n",
154
+ " print(counts)\n",
155
+ " \n",
156
+ "\n",
157
+ " for cls in unique_classes:\n",
158
+ " if cls == 0 or cls == 2 or cls == 1:\n",
159
+ " cls_indices = np.where(y == cls)[0]\n",
160
+ "\n",
161
+ " undersampled_cls_indices = np.random.choice(cls_indices, target_counts[int(cls)], replace=False)\n",
162
+ " undersampled_indices.extend(undersampled_cls_indices)\n",
163
+ " else:\n",
164
+ " cls_indices = np.where(y == cls)[0]\n",
165
+ " \n",
166
+ " max_count = len(cls_indices)\n",
167
+ "\n",
168
+ " undersampled_cls_indices = np.random.choice(cls_indices, max_count, replace=False)\n",
169
+ " undersampled_indices.extend(undersampled_cls_indices)\n",
170
+ "\n",
171
+ " np.random.shuffle(undersampled_indices) # Shuffle to mix classes\n",
172
+ " X_undersampled = X[undersampled_indices]\n",
173
+ " y_undersampled = y[undersampled_indices]\n",
174
+ "\n",
175
+ " return X_undersampled, y_undersampled\n",
176
+ "\n",
177
+ "np.random.seed(42) # Set a specific random seed for reproducibility\n",
178
+ "\n",
179
+ "#X_train, y_train = undersample_data(X_train, y_train, {0: 2750, 1: 2750, 2: 5500})\n",
180
+ "X_train, y_train = undersample_data(X_train, y_train, {0: 4000, 1: 4000, 2: 4000})\n",
181
+ "\n",
182
+ "\n",
183
+ "#X_val, y_val = undersample_data(X_val, y_val, {0: 688, 1: 688, 2: 688})\n",
184
+ "#X_val, y_val = undersample_data(X_val, y_val, {0: 500, 1: 500, 2: 500})\n",
185
+ "X_val, y_val = undersample_data(X_val, y_val, {0: 1000, 1: 1000, 2: 1000})\n",
186
+ "\n",
187
+ "\n",
188
+ "\n",
189
+ "#X_test, y_test = undersample_data(X_test, y_test, {0: 688, 1: 688, 2: 688})\n",
190
+ "#X_test, y_test = undersample_data(X_test, y_test, {0: 500, 1: 500, 2: 500})\n",
191
+ "X_test, y_test = undersample_data(X_test, y_test, {0: 1000, 1: 1000, 2: 1000})"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "markdown",
196
+ "id": "d0902a93-f7e4-4b00-8e1c-50e860f045cc",
197
+ "metadata": {},
198
+ "source": [
199
+ "## Check counts"
200
+ ]
201
+ },
202
+ {
203
+ "cell_type": "code",
204
+ "execution_count": 9,
205
+ "id": "c4ffc18f-b5c6-4932-98d4-38e926c3ca4f",
206
+ "metadata": {},
207
+ "outputs": [],
208
+ "source": [
209
+ "print(np.unique(y_train, return_counts=True))\n",
210
+ "print(np.unique(y_val, return_counts=True)) \n",
211
+ "print(np.unique(y_test, return_counts=True))"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "markdown",
216
+ "id": "7be26503-cbdf-490c-bac7-9df3604b10fc",
217
+ "metadata": {},
218
+ "source": [
219
+ "## Move all to device"
220
+ ]
221
+ },
222
+ {
223
+ "cell_type": "code",
224
+ "execution_count": 10,
225
+ "id": "cd691b16-56a1-4bbb-bb66-e39b292263c1",
226
+ "metadata": {},
227
+ "outputs": [],
228
+ "source": [
229
+ "X_train = torch.tensor(X_train, dtype=torch.float32, device=device)\n",
230
+ "y_train = torch.tensor(y_train, dtype=torch.long, device=device)\n",
231
+ "\n",
232
+ "X_val = torch.tensor(X_val, dtype=torch.float32, device=device)\n",
233
+ "y_val = torch.tensor(y_val, dtype=torch.long, device=device)\n",
234
+ "\n",
235
+ "X_test = torch.tensor(X_test, dtype=torch.float32, device=device)\n",
236
+ "y_test = torch.tensor(y_test, dtype=torch.long, device=device)"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "markdown",
241
+ "id": "e4c6da28-32d2-4979-baa3-274d8304eb89",
242
+ "metadata": {},
243
+ "source": [
244
+ "## Create dataloaders for train, val, test"
245
+ ]
246
+ },
247
+ {
248
+ "cell_type": "code",
249
+ "execution_count": 11,
250
+ "id": "7084da3b-f3ff-42be-afcc-8dc30d4ef3e2",
251
+ "metadata": {},
252
+ "outputs": [],
253
+ "source": [
254
+ "batch_size = 128\n",
255
+ "\n",
256
+ "train_dataset = TensorDataset(X_train, y_train)\n",
257
+ "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
258
+ "\n",
259
+ "val_dataset = TensorDataset(X_val, y_val)\n",
260
+ "val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
261
+ "\n",
262
+ "test_dataset = TensorDataset(X_test, y_test)\n",
263
+ "test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "markdown",
268
+ "id": "9121e433-74be-4391-a60a-f30f18005259",
269
+ "metadata": {
270
+ "tags": []
271
+ },
272
+ "source": [
273
+ "## Set folder-name to save data"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "code",
278
+ "execution_count": 12,
279
+ "id": "0552af31-7025-4afd-b639-069e6704ca0b",
280
+ "metadata": {},
281
+ "outputs": [],
282
+ "source": [
283
+ "folder = '/home/evan/D1/project/code/smaller_model/raw/'\n",
284
+ "\n",
285
+ "if not os.path.exists(folder):\n",
286
+ " os.makedirs(folder, exist_ok=True)"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "markdown",
291
+ "id": "d4bdb1c0-4bac-4092-aee3-bd7833fc0036",
292
+ "metadata": {},
293
+ "source": [
294
+ "## Create and initalize model"
295
+ ]
296
+ },
297
+ {
298
+ "cell_type": "code",
299
+ "execution_count": 13,
300
+ "id": "6f34a0e4-1fe8-45e9-a459-07feee5cde69",
301
+ "metadata": {},
302
+ "outputs": [],
303
+ "source": [
304
+ "import torch\n",
305
+ "import torch.nn as nn\n",
306
+ "import torch.nn.functional as F\n",
307
+ "\n",
308
+ "class BaseModel(nn.Module):\n",
309
+ " def __init__(self):\n",
310
+ " super(BaseModel, self).__init__()\n",
311
+ " \n",
312
+ " self.fc1 = nn.Linear(1024, 64)\n",
313
+ " self.dropout1 = nn.Dropout(0.4)\n",
314
+ " self.bn1 = nn.BatchNorm1d(64)\n",
315
+ " \n",
316
+ " self.fc2 = nn.Linear(64, 32)\n",
317
+ " self.dropout2 = nn.Dropout(0.3)\n",
318
+ " self.bn2 = nn.BatchNorm1d(32)\n",
319
+ " \n",
320
+ " \n",
321
+ " self.fc3 = nn.Linear(32, 16)\n",
322
+ " self.dropout3 = nn.Dropout(0.2)\n",
323
+ " self.bn3 = nn.BatchNorm1d(16)\n",
324
+ " \n",
325
+ " \n",
326
+ " self.fc4 = nn.Linear(16, 3)\n",
327
+ "\n",
328
+ " def forward(self, x):\n",
329
+ " x = F.relu(self.bn1(self.fc1(x)))\n",
330
+ " x = self.dropout1(x)\n",
331
+ " \n",
332
+ " x = F.relu(self.bn2(self.fc2(x)))\n",
333
+ " x = self.dropout2(x)\n",
334
+ " \n",
335
+ " x = F.relu(self.bn3(self.fc3(x)))\n",
336
+ " x = self.dropout3(x)\n",
337
+ " \n",
338
+ " x = self.fc4(x)\n",
339
+ " return x\n",
340
+ "\n",
341
+ "model = BaseModel()\n",
342
+ "print(model)\n"
343
+ ]
344
+ },
345
+ {
346
+ "cell_type": "markdown",
347
+ "id": "c4e9289a-d03b-40ce-961f-07b4d5e61bce",
348
+ "metadata": {},
349
+ "source": [
350
+ "## Check model parameters"
351
+ ]
352
+ },
353
+ {
354
+ "cell_type": "code",
355
+ "execution_count": 14,
356
+ "id": "7142cdef-fa50-4c21-aab2-39b7d54b4e17",
357
+ "metadata": {},
358
+ "outputs": [],
359
+ "source": [
360
+ "def count_parameters(model):\n",
361
+ " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
362
+ "\n",
363
+ "#model = EnhancedMultiLayerClassifier(1024, 3)\n",
364
+ "print(\"Number of trainable parameters:\", count_parameters(model))\n"
365
+ ]
366
+ },
367
+ {
368
+ "cell_type": "markdown",
369
+ "id": "2b9c4905-3eae-44be-8aec-968bd0ed735e",
370
+ "metadata": {
371
+ "tags": []
372
+ },
373
+ "source": [
374
+ "## L1-regularization"
375
+ ]
376
+ },
377
+ {
378
+ "cell_type": "code",
379
+ "execution_count": 15,
380
+ "id": "8b6b7352-6e2b-4a74-8289-122d8d521382",
381
+ "metadata": {},
382
+ "outputs": [],
383
+ "source": [
384
+ "def l1_regularization(model, lambda_l1):\n",
385
+ " l1_penalty = torch.tensor(0., device=device) \n",
386
+ " for param in model.parameters():\n",
387
+ " l1_penalty += torch.norm(param, 1)\n",
388
+ " return lambda_l1 * l1_penalty"
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "markdown",
393
+ "id": "430e52ef-e84c-4c4b-8567-2a4376bc3bf6",
394
+ "metadata": {
395
+ "tags": []
396
+ },
397
+ "source": [
398
+ "## Training loop"
399
+ ]
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "execution_count": 542,
404
+ "id": "8bb58a7d-66d2-46e5-9281-e1476d6f9da3",
405
+ "metadata": {},
406
+ "outputs": [],
407
+ "source": [
408
+ "config = [\n",
409
+ "{\n",
410
+ "'lr': 0.00023189475417053056, 'weight_decay': 0.06013631013820486, 'lambda_l1': 7.530339626757409e-05,\n",
411
+ "'epochs': 80,\n",
412
+ "'break_margin': 2,\n",
413
+ "'loss_function': 'CrossEntropy', # or 'FocalLoss'\n",
414
+ "'alpha': 0.9186381075849595, # Only relevant if using FocalLoss\n",
415
+ "'gamma': 0.2157540954710035 # Only relevant if using FocalLoss\n",
416
+ "}]\n",
417
+ "\n",
418
+ "def run_experiment(config):\n",
419
+ " #model = EnhancedMultiLayerClassifier(1024, 3).to(device)\n",
420
+ " model = BaseModel().to(device)\n",
421
+ " \n",
422
+ " if config['loss_function'] == 'CrossEntropy':\n",
423
+ " criterion = nn.CrossEntropyLoss().to(device)\n",
424
+ " \n",
425
+ " elif config['loss_function'] == 'FocalLoss':\n",
426
+ " # Assuming FocalLoss is defined elsewhere and compatible with your requirements\n",
427
+ " criterion = FocalLoss(alpha=config['alpha'], gamma=config['gamma'], reduction='mean').to(device)\n",
428
+ " \n",
429
+ " optimizer = optim.Adam(model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n",
430
+ " epochs = config['epochs']\n",
431
+ " break_margin = config['break_margin']\n",
432
+ " best_f1 = 0.0\n",
433
+ " time_to_break = 0\n",
434
+ " best_loss = float('inf')\n",
435
+ " train_losses, val_losses = [], []\n",
436
+ " output_file_path = os.path.join(folder, 'training_output_base.txt')\n",
437
+ "\n",
438
+ " \n",
439
+ " with open(output_file_path, 'w') as f:\n",
440
+ " for epoch in range(epochs):\n",
441
+ " model.train()\n",
442
+ " train_loss = 0\n",
443
+ " for X_batch, y_batch in train_loader:\n",
444
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
445
+ " optimizer.zero_grad()\n",
446
+ " outputs = model(X_batch)\n",
447
+ " loss = criterion(outputs, y_batch)\n",
448
+ "\n",
449
+ " # Calculate L1 regularization penalty to prevent overfitting\n",
450
+ " l1_penalty = l1_regularization(model, config['lambda_l1'])\n",
451
+ "\n",
452
+ " # Add L1 penalty to the loss\n",
453
+ " loss += l1_penalty\n",
454
+ "\n",
455
+ " loss.backward()\n",
456
+ " optimizer.step()\n",
457
+ " train_loss += loss.item()\n",
458
+ " train_losses.append(train_loss / len(train_loader))\n",
459
+ "\n",
460
+ " model.eval()\n",
461
+ " val_loss = 0\n",
462
+ " all_preds, all_targets, all_outputs = [], [], []\n",
463
+ " with torch.no_grad():\n",
464
+ " for X_batch, y_batch in val_loader:\n",
465
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
466
+ " outputs = model(X_batch)\n",
467
+ " loss = criterion(outputs, y_batch)\n",
468
+ " val_loss += loss.item()\n",
469
+ " _, predicted = torch.max(outputs.data, 1)\n",
470
+ " all_preds.extend(predicted.cpu().numpy())\n",
471
+ " all_targets.extend(y_batch.cpu().numpy())\n",
472
+ " all_outputs.extend(outputs.cpu().numpy())\n",
473
+ " val_losses.append(val_loss / len(val_loader))\n",
474
+ "\n",
475
+ " #precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
476
+ " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, labels=[2], average='macro', zero_division=0)\n",
477
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
478
+ " \n",
479
+ " \n",
480
+ " output_str = f'Epoch {epoch+1}: Train Loss: {train_losses[-1]:.4f}, Val Loss: {val_losses[-1]:.4f}, Precision: {precision_0:.4f}, Recall: {recall_0:.4f}, F1: {f1_0:.4f}, Accuracy: {accuracy:.4f}\\n'\n",
481
+ " #f.write(output_str)\n",
482
+ " print(output_str, end='')\n",
483
+ " \n",
484
+ " \n",
485
+ " # Save the model if the f1 of the current epoch is the best\n",
486
+ " if f1_0 > best_f1:\n",
487
+ " best_f1 = f1_0\n",
488
+ " best_epoch = epoch\n",
489
+ " best_model_state_dict = model.state_dict()\n",
490
+ " best_all_targets = all_targets\n",
491
+ " best_all_preds = all_preds\n",
492
+ " # Define path for saving the model\n",
493
+ " best_model_path = os.path.join(folder, 'best_model_for_class_test.pt')\n",
494
+ " \n",
495
+ " if val_loss < best_loss:\n",
496
+ " best_loss = val_loss\n",
497
+ " time_to_break = 0\n",
498
+ " else:\n",
499
+ " time_to_break += 1\n",
500
+ " if time_to_break == break_margin:\n",
501
+ " print('Break margin hit')\n",
502
+ " break\n",
503
+ "\n",
504
+ " \n",
505
+ " return best_model_state_dict, best_all_targets, best_all_preds\n",
506
+ "\n",
507
+ "for config_index, config_ in enumerate(config):\n",
508
+ " print(f'Running configuration {config_index + 1}/{len(config)}')\n",
509
+ " best_model_state_dict, all_targets, all_preds = run_experiment(config_)\n",
510
+ "model.load_state_dict(best_model_state_dict)"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "markdown",
515
+ "id": "fc553e10-5f2b-465d-be1f-db80c2463272",
516
+ "metadata": {},
517
+ "source": [
518
+ "## Check entropy/mutual information in features"
519
+ ]
520
+ },
521
+ {
522
+ "cell_type": "code",
523
+ "execution_count": 69,
524
+ "id": "f109848e-7ec5-43f1-9c2f-74b5598aeba6",
525
+ "metadata": {},
526
+ "outputs": [],
527
+ "source": [
528
+ "from sklearn.feature_selection import mutual_info_classif\n",
529
+ "\n",
530
+ "mi_scores = mutual_info_classif(X_val.cpu(), y_val)\n",
531
+ "\n",
532
+ "\n",
533
+ "# Calculate the average mutual information per feature\n",
534
+ "average_mi = np.mean(mi_scores)\n",
535
+ "print(\"Average Mutual Information per feature:\", average_mi)\n",
536
+ "\n",
537
+ "plt.bar(range(len(mi_scores)), mi_scores, edgecolor='none')\n",
538
+ "plt.xlabel('Features')\n",
539
+ "plt.ylabel('Mutual Information Score')\n",
540
+ "plt.title('MI Scores for Zero Padded Frame Features')\n",
541
+ "#plt.savefig(\"padded_mutual_information_feature.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
542
+ "plt.show()"
543
+ ]
544
+ },
545
+ {
546
+ "cell_type": "markdown",
547
+ "id": "fbb1330f-3288-43fe-9f0e-873fe9d10bae",
548
+ "metadata": {},
549
+ "source": [
550
+ "## Optuna optimalization"
551
+ ]
552
+ },
553
+ {
554
+ "cell_type": "code",
555
+ "execution_count": 36,
556
+ "id": "51110ed4-0ee8-4642-b177-0b1343c021dd",
557
+ "metadata": {},
558
+ "outputs": [],
559
+ "source": [
560
+ "import logging\n",
561
+ "import sys\n",
562
+ "import time\n",
563
+ "\n",
564
+ "SEED = 13\n",
565
+ "torch.manual_seed(SEED)\n",
566
+ "\n",
567
+ "\n",
568
+ "def objective(trial):\n",
569
+ " lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)\n",
570
+ " weight_decay = trial.suggest_float(\"weight_decay\", 0, 0.1)\n",
571
+ " lambda_l1 = trial.suggest_float('lambda_l1', 0, 1e-2)\n",
572
+ " #gamma = trial.suggest_float('gamma', 0, 2)\n",
573
+ " #alpha = trial.suggest_float('alpha', 0, 1)\n",
574
+ " \n",
575
+ " model = BaseModel().to(device)\n",
576
+ "\n",
577
+ " \n",
578
+ " criterion = nn.CrossEntropyLoss().to(device)\n",
579
+ " \n",
580
+ " optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n",
581
+ " epochs = 40\n",
582
+ " best_val_f1 = 0\n",
583
+ " epochs_no_improve = 0 \n",
584
+ " \n",
585
+ " # Now I do. with smaller model!\n",
586
+ " early_stop_threshold = 2\n",
587
+ " \n",
588
+ " train_losses, val_losses = [], []\n",
589
+ " #output_file_path = os.path.join(folder, 'base_training_output.txt')\n",
590
+ "\n",
591
+ " for epoch in range(epochs):\n",
592
+ " model.train()\n",
593
+ " train_loss = 0\n",
594
+ " for X_batch, y_batch in train_loader:\n",
595
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
596
+ " optimizer.zero_grad()\n",
597
+ " outputs = model(X_batch)\n",
598
+ " loss = criterion(outputs, y_batch)\n",
599
+ "\n",
600
+ " # Calculate L1 regularization penalty to prevent overfitting\n",
601
+ " l1_penalty = l1_regularization(model, lambda_l1)\n",
602
+ "\n",
603
+ " # Add L1 penalty to the loss\n",
604
+ " loss += l1_penalty\n",
605
+ "\n",
606
+ " loss.backward()\n",
607
+ " optimizer.step()\n",
608
+ " train_loss += loss.item()\n",
609
+ " train_losses.append(train_loss / len(train_loader))\n",
610
+ "\n",
611
+ " model.eval()\n",
612
+ " val_loss = 0\n",
613
+ " all_preds, all_targets, all_outputs = [], [], []\n",
614
+ " with torch.no_grad():\n",
615
+ " for X_batch, y_batch in val_loader:\n",
616
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
617
+ " outputs = model(X_batch)\n",
618
+ " loss = criterion(outputs, y_batch)\n",
619
+ " val_loss += loss.item()\n",
620
+ " _, predicted = torch.max(outputs.data, 1)\n",
621
+ " all_preds.extend(predicted.cpu().numpy())\n",
622
+ " all_targets.extend(y_batch.cpu().numpy())\n",
623
+ " all_outputs.extend(outputs.cpu().numpy())\n",
624
+ " val_losses.append(val_loss / len(val_loader))\n",
625
+ "\n",
626
+ " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
627
+ " #precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, labels=[2], average='macro', zero_division=0)\n",
628
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
629
+ " \n",
630
+ " if f1_0 > best_val_f1:\n",
631
+ " best_val_f1 = f1_0\n",
632
+ " epochs_no_improve = 0\n",
633
+ " else:\n",
634
+ " epochs_no_improve += 1\n",
635
+ "\n",
636
+ " if epochs_no_improve >= early_stop_threshold:\n",
637
+ " print(\"Stopping early due to no improvement\")\n",
638
+ " break\n",
639
+ " trial.report(f1_0, epoch)\n",
640
+ " if trial.should_prune():\n",
641
+ " raise optuna.TrialPruned()\n",
642
+ " return f1_0\n",
643
+ "\n",
644
+ "\n",
645
+ "#optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n",
646
+ "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler(seed=SEED))\n",
647
+ "\n",
648
+ "\n",
649
+ "\n",
650
+ "\n",
651
+ "\n",
652
+ "\n",
653
+ "\n",
654
+ "start_time = time.time()\n",
655
+ "study.optimize(objective, n_trials=150)\n",
656
+ "end_time = time.time()\n",
657
+ "elapsed_time = end_time - start_time\n",
658
+ "\n",
659
+ "print(f\"Optimization took {elapsed_time:.2f} seconds.\")\n"
660
+ ]
661
+ },
662
+ {
663
+ "cell_type": "markdown",
664
+ "id": "4e81ee90-ddc9-430a-b526-02ce5fc8ed49",
665
+ "metadata": {
666
+ "tags": []
667
+ },
668
+ "source": [
669
+ "### Evaluate best model on test test"
670
+ ]
671
+ },
672
+ {
673
+ "cell_type": "code",
674
+ "execution_count": 202,
675
+ "id": "ece25538-1719-45b8-b53a-077b99370761",
676
+ "metadata": {},
677
+ "outputs": [],
678
+ "source": [
679
+ "#model.load_state_dict(torch.load(os.path.join(folder, 'best_model_for_class_test.pt')))\n",
680
+ "#model.to(device)\n",
681
+ "\n",
682
+ "model.eval()\n",
683
+ "test_loss = 0\n",
684
+ "all_preds = []\n",
685
+ "all_targets = []\n",
686
+ "\n",
687
+ "\n",
688
+ "with torch.no_grad(): \n",
689
+ " for X_batch, y_batch in test_loader:\n",
690
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
691
+ " outputs = model(X_batch)\n",
692
+ " loss = criterion(outputs, y_batch)\n",
693
+ " test_loss += loss.item()\n",
694
+ " _, predicted = torch.max(outputs.data, 1)\n",
695
+ " all_preds.extend(predicted.cpu().numpy())\n",
696
+ " all_targets.extend(y_batch.cpu().numpy())\n",
697
+ "\n",
698
+ "test_loss /= len(val_loader)\n",
699
+ "precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
700
+ "accuracy = accuracy_score(all_targets, all_preds)\n",
701
+ "\n",
702
+ "test_output_str = f'Test Loss: {test_loss:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}\\n'\n",
703
+ "print(test_output_str)\n",
704
+ "\n",
705
+ "\n",
706
+ "cm = showConfMatrix(all_targets, all_preds)\n",
707
+ "showClassWiseAcc(cm)"
708
+ ]
709
+ },
710
+ {
711
+ "cell_type": "markdown",
712
+ "id": "e925a5cf-4961-4f8a-b3df-5a2876a19e4f",
713
+ "metadata": {
714
+ "tags": []
715
+ },
716
+ "source": [
717
+ "# Plots and metrics"
718
+ ]
719
+ },
720
+ {
721
+ "cell_type": "markdown",
722
+ "id": "e4c8321e-674a-4328-aedd-d2b6c7b1c0cc",
723
+ "metadata": {
724
+ "tags": []
725
+ },
726
+ "source": [
727
+ "## Plot imports"
728
+ ]
729
+ },
730
+ {
731
+ "cell_type": "code",
732
+ "execution_count": 24,
733
+ "id": "cf291c8d-d70c-4daf-bd74-483a5b071897",
734
+ "metadata": {},
735
+ "outputs": [],
736
+ "source": [
737
+ "from sklearn.metrics import precision_recall_curve\n",
738
+ "from sklearn.preprocessing import label_binarize\n",
739
+ "from sklearn.metrics import roc_curve, auc\n",
740
+ "from itertools import cycle\n",
741
+ "from sklearn.metrics import classification_report"
742
+ ]
743
+ },
744
+ {
745
+ "cell_type": "markdown",
746
+ "id": "7da930eb-005d-4d22-9295-5b3f38143ccd",
747
+ "metadata": {
748
+ "tags": []
749
+ },
750
+ "source": [
751
+ "### Metric-classes"
752
+ ]
753
+ },
754
+ {
755
+ "cell_type": "code",
756
+ "execution_count": 278,
757
+ "id": "58460399-1a01-4353-9198-f142a492d19a",
758
+ "metadata": {},
759
+ "outputs": [],
760
+ "source": [
761
+ "def showConfMatrix(all_targets, all_preds):\n",
762
+ " conf_matrix = confusion_matrix(all_targets, all_preds)\n",
763
+ " # conf_matrix = confusion_matrix(all_preds, all_targets)\n",
764
+ " labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n",
765
+ " #labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n",
766
+ "\n",
767
+ "\n",
768
+ " sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n",
769
+ " # plt.title('Confusion Matrix')\n",
770
+ " plt.xlabel('Predicted Label')\n",
771
+ " plt.ylabel('True Label')\n",
772
+ " #plt.savefig(f\"{folder}/confusionMatrixSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
773
+ " plt.show()\n",
774
+ " return conf_matrix"
775
+ ]
776
+ },
777
+ {
778
+ "cell_type": "code",
779
+ "execution_count": 279,
780
+ "id": "4f658992-815e-48e0-b2b5-f8d049b306f2",
781
+ "metadata": {},
782
+ "outputs": [],
783
+ "source": [
784
+ "def showClassWiseAcc(conf_matrix):\n",
785
+ " # Calculate accuracy per class\n",
786
+ " class_accuracies = conf_matrix.diagonal() / conf_matrix.sum(axis=1)\n",
787
+ "\n",
788
+ " # Prepare accuracy data for writing to file\n",
789
+ " accuracy_data = \"\\n\".join([f\"Accuracy for class {i}: {class_accuracies[i]:.4f}\" for i in range(len(class_accuracies))])\n",
790
+ "\n",
791
+ " # Print accuracy per class and write to a file\n",
792
+ " print(accuracy_data) # Print to console\n",
793
+ "\n",
794
+ " # Define the filename\n",
795
+ " accuracy_file_path = os.path.join(folder, \"class_accuracies.txt\")"
796
+ ]
797
+ },
798
+ {
799
+ "cell_type": "markdown",
800
+ "id": "dcad796c-110f-49ea-8346-37f065f0da63",
801
+ "metadata": {
802
+ "tags": []
803
+ },
804
+ "source": [
805
+ "## Confusion Matrix"
806
+ ]
807
+ },
808
+ {
809
+ "cell_type": "code",
810
+ "execution_count": 40,
811
+ "id": "619c80b5-7740-48af-b12e-bff5a310475e",
812
+ "metadata": {},
813
+ "outputs": [],
814
+ "source": [
815
+ "cm = showConfMatrix(all_targets, all_preds)"
816
+ ]
817
+ },
818
+ {
819
+ "cell_type": "markdown",
820
+ "id": "45e7bf15-8100-4659-a51e-5a1b3d02f303",
821
+ "metadata": {
822
+ "tags": []
823
+ },
824
+ "source": [
825
+ "## Accuracy per class"
826
+ ]
827
+ },
828
+ {
829
+ "cell_type": "code",
830
+ "execution_count": 545,
831
+ "id": "a81ccac7-98d4-42af-ae0a-26327cdd4483",
832
+ "metadata": {},
833
+ "outputs": [],
834
+ "source": [
835
+ "cm = showConfMatrix(all_targets, all_preds)\n",
836
+ "showClassWiseAcc(cm)\n",
837
+ "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n",
838
+ "\n",
839
+ "print(classification_report(all_targets, all_preds, target_names=labels))\n",
840
+ "#torch.save(model.state_dict(), f'{folder}/class_2_74_93_82.pt')\n"
841
+ ]
842
+ },
843
+ {
844
+ "cell_type": "markdown",
845
+ "id": "f9bd689a-50eb-45d0-b150-0cb2c2fad1c1",
846
+ "metadata": {
847
+ "jp-MarkdownHeadingCollapsed": true,
848
+ "tags": []
849
+ },
850
+ "source": [
851
+ "## ROC Curve"
852
+ ]
853
+ },
854
+ {
855
+ "cell_type": "code",
856
+ "execution_count": 74,
857
+ "id": "698d1d4c-d02a-4fd9-899b-42afabccc652",
858
+ "metadata": {},
859
+ "outputs": [],
860
+ "source": [
861
+ "y_score= np.array(all_outputs)\n",
862
+ "fpr = dict()\n",
863
+ "tpr = dict()\n",
864
+ "roc_auc = dict()\n",
865
+ "n_classes = len(labels) \n",
866
+ "\n",
867
+ "y_test_one_hot = np.eye(n_classes)[y_val.cpu()]\n",
868
+ "\n",
869
+ "for i in range(n_classes):\n",
870
+ " fpr[i], tpr[i], _ = roc_curve(y_test_one_hot[:, i], y_score[:, i])\n",
871
+ " roc_auc[i] = auc(fpr[i], tpr[i])\n",
872
+ "\n",
873
+ "# Plot all ROC curves\n",
874
+ "plt.figure()\n",
875
+ "colors = ['blue', 'red', 'green', 'darkorange', 'purple']\n",
876
+ "for i, color in zip(range(n_classes), colors):\n",
877
+ " plt.plot(fpr[i], tpr[i], color=color, lw=2,\n",
878
+ " label='ROC curve of class {0} (area = {1:0.2f})'\n",
879
+ " ''.format(labels[i], roc_auc[i]))\n",
880
+ "\n",
881
+ "plt.plot([0, 1], [0, 1], 'k--', lw=2)\n",
882
+ "plt.xlim([0.0, 1.0])\n",
883
+ "plt.ylim([0.0, 1.05])\n",
884
+ "plt.xlabel('False Positive Rate')\n",
885
+ "plt.ylabel('True Positive Rate')\n",
886
+ "print('Receiver operating characteristic for multi-class')\n",
887
+ "plt.legend(loc=\"lower right\")\n",
888
+ "plt.savefig(f\"{folder}/ROCCurveSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
889
+ "plt.show()"
890
+ ]
891
+ },
892
+ {
893
+ "cell_type": "markdown",
894
+ "id": "3b7c80c8-6fad-4b70-b8e3-b584639fce97",
895
+ "metadata": {
896
+ "tags": []
897
+ },
898
+ "source": [
899
+ "## Multi-Class Precision-Recall Cruve"
900
+ ]
901
+ },
902
+ {
903
+ "cell_type": "code",
904
+ "execution_count": 75,
905
+ "id": "71c9b34d-46be-4be2-b77a-f9734a6f5683",
906
+ "metadata": {},
907
+ "outputs": [],
908
+ "source": [
909
+ "y_test_bin = label_binarize(y_val.cpu(), classes=range(n_classes))\n",
910
+ "\n",
911
+ "precision_recall = {}\n",
912
+ "\n",
913
+ "for i in range(n_classes):\n",
914
+ " precision, recall, _ = precision_recall_curve(y_test_bin[:, i], y_score[:, i])\n",
915
+ " precision_recall[i] = (precision, recall)\n",
916
+ "\n",
917
+ "colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])\n",
918
+ "\n",
919
+ "plt.figure(figsize=(6, 4))\n",
920
+ "\n",
921
+ "for i, color in zip(range(n_classes), colors):\n",
922
+ " precision, recall = precision_recall[i]\n",
923
+ " plt.plot(recall, precision, color=color, lw=2, label=f'{labels[i]}')\n",
924
+ "\n",
925
+ "plt.xlabel('Recall')\n",
926
+ "plt.ylabel('Precision')\n",
927
+ "print('Multi-Class Precision-Recall Curve')\n",
928
+ "plt.legend(loc='best')\n",
929
+ "plt.savefig(f\"{folder}/MultiClassPRCurveSmoteBalance_{epochs_ran}.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
930
+ "plt.show()"
931
+ ]
932
+ },
933
+ {
934
+ "cell_type": "markdown",
935
+ "id": "bbf3db37-57cd-4e32-80ba-0a29047d2015",
936
+ "metadata": {
937
+ "tags": []
938
+ },
939
+ "source": [
940
+ "# Meta Learner"
941
+ ]
942
+ },
943
+ {
944
+ "cell_type": "markdown",
945
+ "id": "6e9d5d36-1a7d-4b8e-8c16-04aa416ff1a0",
946
+ "metadata": {},
947
+ "source": [
948
+ "By stacking outputs from 3 models, all with one speciality, we train a meta-model/meta-learner by feeding it all 3 models input and the correct label, so it is able to learn where there are strenghts and weaknesses of the other three models. This is done by stacking the base-models outputs to later use as input data for training the meta-learner."
949
+ ]
950
+ },
951
+ {
952
+ "cell_type": "code",
953
+ "execution_count": 16,
954
+ "id": "51db8fbd-067f-4f67-8871-591338657714",
955
+ "metadata": {},
956
+ "outputs": [],
957
+ "source": [
958
+ "folder1 = '/home/evan/D1/project/code/smaller_model/raw_training/'"
959
+ ]
960
+ },
961
+ {
962
+ "cell_type": "markdown",
963
+ "id": "ee8bb02e-fb57-416d-b056-021e6e53a843",
964
+ "metadata": {},
965
+ "source": [
966
+ "## Generate base-model outputs"
967
+ ]
968
+ },
969
+ {
970
+ "cell_type": "code",
971
+ "execution_count": 18,
972
+ "id": "6f766c76-c779-4007-b299-324106fae0ce",
973
+ "metadata": {},
974
+ "outputs": [],
975
+ "source": [
976
+ "model0 = BaseModel().to(device)\n",
977
+ "model1 = BaseModel().to(device)\n",
978
+ "model2 = BaseModel().to(device)\n",
979
+ "\n",
980
+ "\n",
981
+ " \n",
982
+ "model0.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_0/class_0_65_79_71.pt')))\n",
983
+ "model0.to(device)\n",
984
+ "\n",
985
+ "model1.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_1/class_1_74_93_83.pt')))\n",
986
+ "model1.to(device)\n",
987
+ "\n",
988
+ "model2.load_state_dict(torch.load(os.path.join(f'{folder1}', 'class_2/class_2_84_90_87.pt')))\n",
989
+ "model2.to(device)\n",
990
+ "\n",
991
+ "base_model_outputs = []\n",
992
+ "\n",
993
+ "with torch.no_grad():\n",
994
+ " for X_batch, _ in val_loader:\n",
995
+ " X_batch = X_batch.to(device)\n",
996
+ " # Store the probabilities, not the class predictions\n",
997
+ " probs0 = torch.softmax(model0(X_batch), dim=1)\n",
998
+ " probs1 = torch.softmax(model1(X_batch), dim=1)\n",
999
+ " probs2 = torch.softmax(model2(X_batch), dim=1)\n",
1000
+ " \n",
1001
+ " # Concatenate the model outputs along feature dimension\n",
1002
+ " model_output = torch.cat((probs0, probs1, probs2), dim=1)\n",
1003
+ " \n",
1004
+ " base_model_outputs.append(model_output)\n",
1005
+ "\n",
1006
+ "# Stack all batches to form the complete set of base model outputs\n",
1007
+ "base_model_outputs = torch.cat(base_model_outputs, dim=0)\n"
1008
+ ]
1009
+ },
1010
+ {
1011
+ "cell_type": "markdown",
1012
+ "id": "342dd954-803b-4d4d-bcec-56bdd2fd3166",
1013
+ "metadata": {},
1014
+ "source": [
1015
+ "### Meta-Learner class"
1016
+ ]
1017
+ },
1018
+ {
1019
+ "cell_type": "code",
1020
+ "execution_count": 19,
1021
+ "id": "6ec860ca-ff28-402b-b1c2-263283057b27",
1022
+ "metadata": {},
1023
+ "outputs": [],
1024
+ "source": [
1025
+ "import torch.nn as nn\n",
1026
+ "import torch.nn.init as init\n",
1027
+ "\n",
1028
+ "class MetaModel(nn.Module):\n",
1029
+ " def __init__(self, input_size, num_classes=3):\n",
1030
+ " super(MetaModel, self).__init__()\n",
1031
+ " \n",
1032
+ " self.network = nn.Sequential(\n",
1033
+ " nn.Linear(input_size, 32),\n",
1034
+ " nn.BatchNorm1d(32), \n",
1035
+ " nn.ReLU(),\n",
1036
+ " nn.Dropout(0.3),\n",
1037
+ " \n",
1038
+ " nn.Linear(32, num_classes),\n",
1039
+ " nn.LogSoftmax(dim=1)\n",
1040
+ " )\n",
1041
+ " \n",
1042
+ " # Apply kaiming initialization to all linear layers\n",
1043
+ " self.apply(self.initialize_weights)\n",
1044
+ "\n",
1045
+ " def forward(self, x):\n",
1046
+ " x = self.network(x)\n",
1047
+ " return x\n",
1048
+ "\n",
1049
+ " def initialize_weights(self, m):\n",
1050
+ " if isinstance(m, nn.Linear):\n",
1051
+ " init.kaiming_uniform_(m.weight, nonlinearity='relu')\n",
1052
+ " if m.bias is not None:\n",
1053
+ " init.constant_(m.bias, 0)\n"
1054
+ ]
1055
+ },
1056
+ {
1057
+ "cell_type": "markdown",
1058
+ "id": "580ad04e-8ad0-40a9-b8ce-94006dd7d114",
1059
+ "metadata": {},
1060
+ "source": [
1061
+ "## Split basemodel-outputs to train, val"
1062
+ ]
1063
+ },
1064
+ {
1065
+ "cell_type": "code",
1066
+ "execution_count": 20,
1067
+ "id": "7c935eb9-daba-45e4-807d-17b7e47d57ed",
1068
+ "metadata": {},
1069
+ "outputs": [],
1070
+ "source": [
1071
+ "y_val = torch.cat([y for _, y in val_loader], dim=0) # Just to make sure y_val is y_val, extract from val_loader\n",
1072
+ "\n",
1073
+ "print(np.unique(y_val.cpu().numpy(), return_counts=True))\n",
1074
+ "\n",
1075
+ "X_meta_train, X_meta_val, y_meta_train, y_meta_val = train_test_split(\n",
1076
+ " base_model_outputs.cpu().numpy(), \n",
1077
+ " y_val.cpu().numpy(), \n",
1078
+ " test_size=0.2, \n",
1079
+ " random_state=42\n",
1080
+ ")\n",
1081
+ "\n",
1082
+ "input_size = base_model_outputs.size(1) # Will be 3*num_classes (3 models now) so 9"
1083
+ ]
1084
+ },
1085
+ {
1086
+ "cell_type": "markdown",
1087
+ "id": "79008d19-0814-4b21-beaf-b2851d6d5616",
1088
+ "metadata": {},
1089
+ "source": [
1090
+ "## Create dataloaders"
1091
+ ]
1092
+ },
1093
+ {
1094
+ "cell_type": "code",
1095
+ "execution_count": 21,
1096
+ "id": "772650c2-90ba-40d6-b0e5-1be0c60b664e",
1097
+ "metadata": {},
1098
+ "outputs": [],
1099
+ "source": [
1100
+ "\n",
1101
+ "# Convert numpy arrays back to tensors for training\n",
1102
+ "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n",
1103
+ "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n",
1104
+ "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n",
1105
+ "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n",
1106
+ "\n",
1107
+ "train_meta_dataset = TensorDataset(X_meta_train, y_meta_train)\n",
1108
+ "train_meta_loader = DataLoader(train_meta_dataset, batch_size=64, shuffle=True)\n",
1109
+ "\n",
1110
+ "val_meta_dataset = TensorDataset(X_meta_val, y_meta_val)\n",
1111
+ "val_meta_loader = DataLoader(val_meta_dataset, batch_size=64, shuffle=False)\n"
1112
+ ]
1113
+ },
1114
+ {
1115
+ "cell_type": "markdown",
1116
+ "id": "7341faac-bfb6-4aff-be2b-2b98a398d55e",
1117
+ "metadata": {},
1118
+ "source": [
1119
+ "## Optimize meta-learner"
1120
+ ]
1121
+ },
1122
+ {
1123
+ "cell_type": "code",
1124
+ "execution_count": 181,
1125
+ "id": "bbb99bb4-875f-4df2-8173-70ad2679f9a8",
1126
+ "metadata": {},
1127
+ "outputs": [],
1128
+ "source": [
1129
+ "import logging\n",
1130
+ "import sys\n",
1131
+ "\n",
1132
+ "SEED = 13\n",
1133
+ "torch.manual_seed(SEED)\n",
1134
+ "\n",
1135
+ "#criterion = FocalLoss(alpha=1, gamma=2, reduction='mean')\n",
1136
+ "\n",
1137
+ "# Convert numpy arrays back to tensors for training\n",
1138
+ "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n",
1139
+ "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n",
1140
+ "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n",
1141
+ "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n",
1142
+ "\n",
1143
+ "train_meta_dataset = TensorDataset(X_meta_train, y_meta_train)\n",
1144
+ "train_meta_loader = DataLoader(train_meta_dataset, batch_size=64, shuffle=True)\n",
1145
+ "\n",
1146
+ "val_meta_dataset = TensorDataset(X_meta_val, y_meta_val)\n",
1147
+ "val_meta_loader = DataLoader(val_meta_dataset, batch_size=64, shuffle=False)\n",
1148
+ "\n",
1149
+ "\n",
1150
+ "\n",
1151
+ "def objective(trial):\n",
1152
+ " lr = trial.suggest_float('lr', 1e-5, 1e-1, log=True)\n",
1153
+ " weight_decay = trial.suggest_float(\"weight_decay\", 0, 0.1)\n",
1154
+ " lambda_l1 = trial.suggest_float('lambda_l1', 0, 1e-2)\n",
1155
+ " #gamma = trial.suggest_float('gamma', 0, 2)\n",
1156
+ " #alpha = trial.suggest_float('alpha', 0, 1)\n",
1157
+ " \n",
1158
+ " meta_model = MetaModel(input_size=input_size).to(device) \n",
1159
+ " optimizer = torch.optim.Adam(meta_model.parameters(), lr=lr, weight_decay=weight_decay)\n",
1160
+ " criterion = nn.CrossEntropyLoss()\n",
1161
+ "\n",
1162
+ " #model0, model1, model2 = get_models()\n",
1163
+ "\n",
1164
+ " \n",
1165
+ " \n",
1166
+ " epochs = 400\n",
1167
+ " best_val_f1 = 0\n",
1168
+ " epochs_no_improve = 0\n",
1169
+ " early_stop_threshold = 2\n",
1170
+ " \n",
1171
+ " train_losses, val_losses = [], []\n",
1172
+ " #output_file_path = os.path.join(folder, 'training_output.txt')\n",
1173
+ "\n",
1174
+ " for epoch in range(epochs):\n",
1175
+ " model.train()\n",
1176
+ " train_loss = 0\n",
1177
+ " for X_batch, y_batch in train_meta_loader:\n",
1178
+ " \n",
1179
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
1180
+ " optimizer.zero_grad()\n",
1181
+ " \n",
1182
+ " outputs = meta_model(X_batch)\n",
1183
+ " loss = criterion(outputs, y_batch)\n",
1184
+ "\n",
1185
+ " # Calculate L1 regularization penalty to prevent overfitting\n",
1186
+ " l1_penalty = l1_regularization(model, lambda_l1)\n",
1187
+ "\n",
1188
+ " # Add L1 penalty to the loss\n",
1189
+ " loss += l1_penalty\n",
1190
+ "\n",
1191
+ " loss.backward()\n",
1192
+ " optimizer.step()\n",
1193
+ " train_loss += loss.item()\n",
1194
+ " train_losses.append(train_loss / len(train_loader))\n",
1195
+ "\n",
1196
+ " model.eval()\n",
1197
+ " val_loss = 0\n",
1198
+ " all_preds, all_targets, all_outputs = [], [], []\n",
1199
+ " with torch.no_grad():\n",
1200
+ " for X_batch, y_batch in val_meta_loader:\n",
1201
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
1202
+ " outputs = meta_model(X_batch)\n",
1203
+ " loss = criterion(outputs, y_batch)\n",
1204
+ " val_loss += loss.item()\n",
1205
+ " _, predicted = torch.max(outputs.data, 1)\n",
1206
+ " all_preds.extend(predicted.cpu().numpy())\n",
1207
+ " all_targets.extend(y_batch.cpu().numpy())\n",
1208
+ " all_outputs.extend(outputs.cpu().numpy())\n",
1209
+ " val_losses.append(val_loss / len(val_loader))\n",
1210
+ "\n",
1211
+ " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
1212
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
1213
+ " \n",
1214
+ " if f1_0 > best_val_f1:\n",
1215
+ " best_val_f1 = f1_0\n",
1216
+ " epochs_no_improve = 0\n",
1217
+ " else:\n",
1218
+ " epochs_no_improve += 1\n",
1219
+ "\n",
1220
+ " if epochs_no_improve >= early_stop_threshold:\n",
1221
+ " print(\"Stopping early due to no improvement\")\n",
1222
+ " break\n",
1223
+ " trial.report(f1_0, epoch)\n",
1224
+ " if trial.should_prune():\n",
1225
+ " raise optuna.TrialPruned()\n",
1226
+ " return f1_0\n",
1227
+ "\n",
1228
+ "#optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))\n",
1229
+ "study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler(seed=SEED))\n",
1230
+ "study.optimize(objective, n_trials=150)"
1231
+ ]
1232
+ },
1233
+ {
1234
+ "cell_type": "markdown",
1235
+ "id": "c8bd0c66-1a1f-41aa-80d1-b921f8dfb4c1",
1236
+ "metadata": {},
1237
+ "source": [
1238
+ "## Print optuna-stats"
1239
+ ]
1240
+ },
1241
+ {
1242
+ "cell_type": "code",
1243
+ "execution_count": 182,
1244
+ "id": "0b0a7fd5-d1e0-4466-8a57-f435ba68e1d2",
1245
+ "metadata": {},
1246
+ "outputs": [],
1247
+ "source": [
1248
+ "\n",
1249
+ "# Get the best parameters\n",
1250
+ "best_params = study.best_params\n",
1251
+ "\n",
1252
+ "# Print the best parameters\n",
1253
+ "print(\"Best parameters:\", best_params)\n",
1254
+ "\n",
1255
+ "# Get the best parameters\n",
1256
+ "best_trial = study.best_trial\n",
1257
+ "\n",
1258
+ "# Print the best parameters\n",
1259
+ "print(\"Best parameters:\", best_trial)\n"
1260
+ ]
1261
+ },
1262
+ {
1263
+ "cell_type": "markdown",
1264
+ "id": "c9b28dbc-4266-4485-a814-548ed9e01c3c",
1265
+ "metadata": {},
1266
+ "source": [
1267
+ "### Meta-learner training"
1268
+ ]
1269
+ },
1270
+ {
1271
+ "cell_type": "markdown",
1272
+ "id": "2ad3c476-178b-4ad6-9eba-19a5727ad48b",
1273
+ "metadata": {},
1274
+ "source": [
1275
+ "Found out that cross entropy gives more stable accuracies across classes."
1276
+ ]
1277
+ },
1278
+ {
1279
+ "cell_type": "code",
1280
+ "execution_count": 26,
1281
+ "id": "4eb430e4-1dee-455f-9279-7e71ac9005cb",
1282
+ "metadata": {},
1283
+ "outputs": [],
1284
+ "source": [
1285
+ "y_val = torch.cat([y for _, y in val_loader], dim=0) # Just to make sure y_val is y_val, extract from val_loader\n",
1286
+ "\n",
1287
+ "print(np.unique(y_val.cpu().numpy(), return_counts=True))\n",
1288
+ "# Split the data into train, validation, and test sets\n",
1289
+ "X_meta_train, X_meta_temp, y_meta_train, y_meta_temp = train_test_split(\n",
1290
+ " base_model_outputs.cpu().numpy(), \n",
1291
+ " y_val.cpu().numpy(), \n",
1292
+ " test_size=0.4, \n",
1293
+ " random_state=42\n",
1294
+ ")\n",
1295
+ "\n",
1296
+ "X_meta_val, X_meta_test, y_meta_val, y_meta_test = train_test_split(\n",
1297
+ " X_meta_temp, \n",
1298
+ " y_meta_temp, \n",
1299
+ " test_size=0.5, \n",
1300
+ " random_state=42\n",
1301
+ ")\n",
1302
+ "\n",
1303
+ "input_size = base_model_outputs.size(1) # Will be 3*num_classes (3 models now) so 9\n",
1304
+ "\n",
1305
+ "config = {\n",
1306
+ " 'lr': 0.007728103291008411, \n",
1307
+ " 'weight_decay': 0.003503652410143732, \n",
1308
+ " 'lambda_l1': 0.002984494708891794\n",
1309
+ "}\n",
1310
+ "\n",
1311
+ "meta_model = MetaModel(input_size=input_size).to(device) \n",
1312
+ "optimizer = torch.optim.Adam(meta_model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n",
1313
+ "lambda_l1 = config['lambda_l1']\n",
1314
+ "\n",
1315
+ "criterion = nn.CrossEntropyLoss()\n",
1316
+ "\n",
1317
+ "# Convert numpy arrays back to tensors for training\n",
1318
+ "X_meta_train = torch.tensor(X_meta_train, dtype=torch.float).to(device)\n",
1319
+ "y_meta_train = torch.tensor(y_meta_train, dtype=torch.long).to(device)\n",
1320
+ "X_meta_val = torch.tensor(X_meta_val, dtype=torch.float).to(device)\n",
1321
+ "y_meta_val = torch.tensor(y_meta_val, dtype=torch.long).to(device)\n",
1322
+ "X_meta_test = torch.tensor(X_meta_test, dtype=torch.float).to(device)\n",
1323
+ "y_meta_test = torch.tensor(y_meta_test, dtype=torch.long).to(device)\n",
1324
+ "\n",
1325
+ "all_preds, all_targets, all_outputs = [], [], []\n",
1326
+ "train_losses, val_losses = [], []\n",
1327
+ "\n",
1328
+ "best_val_f1 = 0\n",
1329
+ "epochs_no_improve = 0\n",
1330
+ "early_stop_threshold = 2\n",
1331
+ "\n",
1332
+ "best_f1 = 0.0\n",
1333
+ "\n",
1334
+ "# Training loop for the meta-model\n",
1335
+ "epochs = 80\n",
1336
+ "\n",
1337
+ "for epoch in range(epochs):\n",
1338
+ " meta_model.train()\n",
1339
+ " train_loss = 0\n",
1340
+ " for X_batch, y_batch in train_meta_loader:\n",
1341
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
1342
+ " optimizer.zero_grad()\n",
1343
+ " outputs = meta_model(X_batch)\n",
1344
+ " loss = criterion(outputs, y_batch)\n",
1345
+ " l1_penalty = l1_regularization(meta_model, lambda_l1)\n",
1346
+ " loss += l1_penalty\n",
1347
+ " loss.backward()\n",
1348
+ " optimizer.step()\n",
1349
+ " train_loss += loss.item()\n",
1350
+ " train_losses.append(train_loss / len(train_meta_loader))\n",
1351
+ "\n",
1352
+ " meta_model.eval()\n",
1353
+ " val_loss = 0\n",
1354
+ " all_preds, all_targets, all_outputs = [], [], []\n",
1355
+ " with torch.no_grad():\n",
1356
+ " for X_batch, y_batch in val_meta_loader:\n",
1357
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
1358
+ " outputs = meta_model(X_batch)\n",
1359
+ " loss = criterion(outputs, y_batch)\n",
1360
+ " val_loss += loss.item()\n",
1361
+ " _, predicted = torch.max(outputs.data, 1)\n",
1362
+ " all_preds.extend(predicted.cpu().numpy())\n",
1363
+ " all_targets.extend(y_batch.cpu().numpy())\n",
1364
+ " all_outputs.extend(outputs.cpu().numpy())\n",
1365
+ " val_losses.append(val_loss / len(val_meta_loader))\n",
1366
+ "\n",
1367
+ " precision_0, recall_0, f1_0, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
1368
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
1369
+ "\n",
1370
+ " output_str = f'Epoch {epoch+1}: Train Loss: {train_losses[-1]:.4f}, Val Loss: {val_losses[-1]:.4f}, Precision: {precision_0:.4f}, Recall: {recall_0:.4f}, F1: {f1_0:.4f}, Accuracy: {accuracy:.4f}\\n'\n",
1371
+ " print(output_str)\n",
1372
+ "\n",
1373
+ " if f1_0 > best_f1:\n",
1374
+ " best_f1 = f1_0\n",
1375
+ " best_epoch = epoch\n",
1376
+ " best_model_state_dict = meta_model.state_dict()\n",
1377
+ " best_all_targets = all_targets\n",
1378
+ " best_all_preds = all_preds\n",
1379
+ " epochs_no_improve = 0\n",
1380
+ " else:\n",
1381
+ " epochs_no_improve += 1\n",
1382
+ "\n",
1383
+ " if epochs_no_improve >= early_stop_threshold:\n",
1384
+ " print(\"Stopping early due to no improvement\")\n",
1385
+ " break\n",
1386
+ "\n",
1387
+ "meta_model.load_state_dict(best_model_state_dict)\n"
1388
+ ]
1389
+ },
1390
+ {
1391
+ "cell_type": "markdown",
1392
+ "id": "041c315b-068b-4ebb-94d7-629b75450d71",
1393
+ "metadata": {},
1394
+ "source": [
1395
+ "## Inference on test set"
1396
+ ]
1397
+ },
1398
+ {
1399
+ "cell_type": "code",
1400
+ "execution_count": null,
1401
+ "id": "6384288c-63d7-4bca-88fb-5bca1df47d7c",
1402
+ "metadata": {},
1403
+ "outputs": [],
1404
+ "source": [
1405
+ "# Evaluate on the test set\n",
1406
+ "meta_model.eval()\n",
1407
+ "test_loss = 0\n",
1408
+ "all_test_preds, all_test_targets = [], []\n",
1409
+ "with torch.no_grad():\n",
1410
+ " for X_batch, y_batch in test_meta_loader:\n",
1411
+ " X_batch, y_batch = X_batch.to(device), y_batch.to(device)\n",
1412
+ " outputs = meta_model(X_batch)\n",
1413
+ " loss = criterion(outputs, y_batch)\n",
1414
+ " test_loss += loss.item()\n",
1415
+ " _, predicted = torch.max(outputs.data, 1)\n",
1416
+ " all_test_preds.extend(predicted.cpu().numpy())\n",
1417
+ " all_test_targets.extend(y_batch.cpu().numpy())\n",
1418
+ "\n",
1419
+ "test_loss /= len(test_meta_loader)\n",
1420
+ "precision_test, recall_test, f1_test, _ = precision_recall_fscore_support(all_test_targets, all_test_preds, average='weighted', zero_division=0)\n",
1421
+ "accuracy_test = accuracy_score(all_test_targets, all_test_preds)\n",
1422
+ "\n",
1423
+ "print(f'Test Loss: {test_loss:.4f}, Test Precision: {precision_test:.4f}, Test Recall: {recall_test:.4f}, Test F1: {f1_test:.4f}, Test Accuracy: {accuracy_test:.4f}')"
1424
+ ]
1425
+ },
1426
+ {
1427
+ "cell_type": "markdown",
1428
+ "id": "a17da765-d795-49f3-abbb-06850b7f9264",
1429
+ "metadata": {
1430
+ "tags": []
1431
+ },
1432
+ "source": [
1433
+ "### Conf-matrix and class-wise acc for meta-learner"
1434
+ ]
1435
+ },
1436
+ {
1437
+ "cell_type": "code",
1438
+ "execution_count": 27,
1439
+ "id": "eb41e5f6-d038-4696-ae55-3d2e3ebe7c2e",
1440
+ "metadata": {},
1441
+ "outputs": [],
1442
+ "source": [
1443
+ "conf_matrix = confusion_matrix(all_targets, all_preds)\n",
1444
+ "# conf_matrix = confusion_matrix(all_preds, all_targets)\n",
1445
+ "labels = [\"background\", \"tackle-live\", \"tackle-replay\"]\n",
1446
+ "#labels = [\"background\", \"tackle-live\", \"tackle-replay\", \"tackle-live-incomplete\", \"tackle-replay-incomplete\"]\n",
1447
+ "\n",
1448
+ "\n",
1449
+ "sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n",
1450
+ "# plt.title('Confusion Matrix')\n",
1451
+ "plt.xlabel('Predicted Label')\n",
1452
+ "plt.ylabel('True Label')\n",
1453
+ "#plt.savefig(f\"{folder1}/meta/MetaModel_stretched.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
1454
+ "plt.show()\n",
1455
+ "\n",
1456
+ "# Calculate accuracy per class\n",
1457
+ "class_accuracies = conf_matrix.diagonal() / conf_matrix.sum(axis=1)\n",
1458
+ "\n",
1459
+ "# Prepare accuracy data for writing to file\n",
1460
+ "accuracy_data = \"\\n\".join([f\"Accuracy for class {i}: {class_accuracies[i]:.4f}\" for i in range(len(class_accuracies))])\n",
1461
+ "print(classification_report(all_targets, all_preds, target_names=labels))\n",
1462
+ "\n",
1463
+ "\n",
1464
+ "# Print accuracy per class and write to a file\n",
1465
+ "print(accuracy_data) # Print to console\n",
1466
+ "\n",
1467
+ "#torch.save(meta_model.state_dict(), f'{folder1}/meta/meta_model.pt')\n",
1468
+ "\n",
1469
+ "# Define the filename\n",
1470
+ "#accuracy_file_path = os.path.join(folder, \"class_accuracies.txt\")\n",
1471
+ "\n",
1472
+ "# Write accuracies to a file in the specified folder\n",
1473
+ "#with open(accuracy_file_path, 'w') as f:\n",
1474
+ "# f.write(f\"Samples: {len(all_preds)}\\n\") # Write the number of samples\n",
1475
+ "# f.write(accuracy_data) # Write the accuracy data"
1476
+ ]
1477
+ },
1478
+ {
1479
+ "cell_type": "markdown",
1480
+ "id": "67a4e623-b813-4d1c-bcc5-45cca41140f9",
1481
+ "metadata": {},
1482
+ "source": [
1483
+ "## Roc curve"
1484
+ ]
1485
+ },
1486
+ {
1487
+ "cell_type": "code",
1488
+ "execution_count": 64,
1489
+ "id": "894c35a1-a52f-438b-93ce-044429bdaf2f",
1490
+ "metadata": {},
1491
+ "outputs": [],
1492
+ "source": [
1493
+ "import numpy as np\n",
1494
+ "from sklearn.metrics import roc_curve, auc\n",
1495
+ "from sklearn.preprocessing import label_binarize\n",
1496
+ "import matplotlib.pyplot as plt\n",
1497
+ "\n",
1498
+ "# Class names and count\n",
1499
+ "class_names = ['background', 'tackle-live', 'tackle-replay']\n",
1500
+ "n_classes = len(class_names)\n",
1501
+ "\n",
1502
+ "# Binarize the targets and predictions for ROC curve computation\n",
1503
+ "test_targets_bin = label_binarize(targets, classes=[0, 1, 2])\n",
1504
+ "test_predictions_bin = label_binarize(predictions, classes=[0, 1, 2])\n",
1505
+ "\n",
1506
+ "# ROC curve and AUC for each class\n",
1507
+ "fpr = {}\n",
1508
+ "tpr = {}\n",
1509
+ "roc_auc = {}\n",
1510
+ "\n",
1511
+ "for i in range(n_classes):\n",
1512
+ " fpr[i], tpr[i], _ = roc_curve(test_targets_bin[:, i], test_predictions_bin[:, i])\n",
1513
+ " roc_auc[i] = auc(fpr[i], tpr[i])\n",
1514
+ "\n",
1515
+ "# Plot ROC curves for each class\n",
1516
+ "plt.figure(figsize=(8, 6))\n",
1517
+ "for i in range(n_classes):\n",
1518
+ " plt.plot(fpr[i], tpr[i], label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})')\n",
1519
+ "plt.plot([0, 1], [0, 1], 'k--')\n",
1520
+ "plt.xlim([0.0, 1.0])\n",
1521
+ "plt.grid(visible=True)\n",
1522
+ "plt.ylim([0.0, 1.05])\n",
1523
+ "plt.xlabel('False Positive Rate')\n",
1524
+ "plt.ylabel('True Positive Rate')\n",
1525
+ "plt.title('Multi-class ROC Curve')\n",
1526
+ "plt.legend(loc='lower right')\n",
1527
+ "plt.savefig(\"baseline-roc.pdf\", format=\"pdf\", bbox_inches=\"tight\")\n",
1528
+ "plt.show()"
1529
+ ]
1530
+ },
1531
+ {
1532
+ "cell_type": "markdown",
1533
+ "id": "580a616d-5b01-4725-9b5b-71c71537bf22",
1534
+ "metadata": {},
1535
+ "source": [
1536
+ "## Check model agreement"
1537
+ ]
1538
+ },
1539
+ {
1540
+ "cell_type": "code",
1541
+ "execution_count": 241,
1542
+ "id": "bbe695f7-458d-4559-bc58-0b14dba6785d",
1543
+ "metadata": {},
1544
+ "outputs": [],
1545
+ "source": [
1546
+ "import torch\n",
1547
+ "from sklearn.metrics import precision_recall_fscore_support, accuracy_score\n",
1548
+ "\n",
1549
+ "def evaluate_model_agreement(meta_model, base_models, val_loader, device, criterion):\n",
1550
+ " meta_model.eval()\n",
1551
+ " agreement_counts = [0] * len(base_models) # Agreement count for each base model\n",
1552
+ " total_predictions = 0 # Total predictions made\n",
1553
+ "\n",
1554
+ " all_preds = []\n",
1555
+ " all_targets = []\n",
1556
+ "\n",
1557
+ " with torch.no_grad():\n",
1558
+ " for X_batch, y_meta_val in val_loader:\n",
1559
+ " X_batch = X_batch.to(device)\n",
1560
+ " y_meta_val = y_meta_val.to(device)\n",
1561
+ "\n",
1562
+ " # Get predictions from each base model and the MetaModel\n",
1563
+ " base_probs = [torch.softmax(model(X_batch), dim=1) for model in base_models]\n",
1564
+ " base_predictions = [torch.max(probs, 1)[1] for probs in base_probs]\n",
1565
+ "\n",
1566
+ " # Concatenate the model outputs along feature dimension for MetaModel input\n",
1567
+ " meta_input = torch.cat(base_probs, dim=1)\n",
1568
+ " meta_outputs = meta_model(meta_input)\n",
1569
+ " meta_predictions = torch.max(meta_outputs, 1)[1]\n",
1570
+ "\n",
1571
+ " # Compare MetaModel predictions with each base model's predictions\n",
1572
+ " for i, base_preds in enumerate(base_predictions):\n",
1573
+ " agreement_counts[i] += (base_preds == meta_predictions).sum().item()\n",
1574
+ "\n",
1575
+ " total_predictions += y_meta_val.size(0)\n",
1576
+ " \n",
1577
+ " # Collect predictions for evaluation\n",
1578
+ " all_preds.extend(meta_predictions.cpu().numpy())\n",
1579
+ " all_targets.extend(y_meta_val.cpu().numpy())\n",
1580
+ "\n",
1581
+ " # Compute loss (optional)\n",
1582
+ " val_loss = criterion(meta_outputs, y_meta_val)\n",
1583
+ "\n",
1584
+ " # Calculate precision, recall, f1-score, and accuracy\n",
1585
+ " precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
1586
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
1587
+ "\n",
1588
+ " # Calculate agreement percentages\n",
1589
+ " agreement_percentages = [count / total_predictions * 100 for count in agreement_counts]\n",
1590
+ "\n",
1591
+ " print(f\"Loss: {val_loss.item()}\")\n",
1592
+ " print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}')\n",
1593
+ " return agreement_percentages\n",
1594
+ "\n",
1595
+ "# Usage\n",
1596
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
1597
+ "#meta_model = best_meta_model # Your loaded MetaModel\n",
1598
+ "base_models = [model0, model1, model2] # List of your base models\n",
1599
+ "criterion = torch.nn.CrossEntropyLoss() # Your loss function\n",
1600
+ "\n",
1601
+ "agreement_percentages = evaluate_model_agreement(meta_model, base_models, val_loader, device, criterion)\n",
1602
+ "for i, pct in enumerate(agreement_percentages):\n",
1603
+ " print(f\"Model {i} Agreement Percentage: {pct:.2f}%\")\n",
1604
+ "\n",
1605
+ " \n",
1606
+ "print(classification_report(all_targets, all_preds, target_names=labels))\n",
1607
+ " "
1608
+ ]
1609
+ },
1610
+ {
1611
+ "cell_type": "markdown",
1612
+ "id": "b4a9897e-6a1c-4f13-8789-435ba42308c9",
1613
+ "metadata": {},
1614
+ "source": [
1615
+ "## Inference"
1616
+ ]
1617
+ },
1618
+ {
1619
+ "cell_type": "code",
1620
+ "execution_count": 28,
1621
+ "id": "80e2b209-0a62-45dd-a7c5-a655d3653648",
1622
+ "metadata": {},
1623
+ "outputs": [],
1624
+ "source": [
1625
+ "all_preds, all_targets, all_outputs = [], [], []\n",
1626
+ "import time\n",
1627
+ "\n",
1628
+ "batch_times = []\n",
1629
+ "\n",
1630
+ "with torch.no_grad():\n",
1631
+ " start_time = time.time()\n",
1632
+ " \n",
1633
+ " for batch_idx, (X_batch, y_meta_val) in enumerate(test_loader):\n",
1634
+ " X_batch = X_batch.to(device)\n",
1635
+ " # Store the probabilities, not the class predictions\n",
1636
+ " probs0 = torch.softmax(model0(X_batch), dim=1)\n",
1637
+ " probs1 = torch.softmax(model1(X_batch), dim=1)\n",
1638
+ " probs2 = torch.softmax(model2(X_batch), dim=1)\n",
1639
+ " \n",
1640
+ " # Concatenate the model outputs along feature dimension\n",
1641
+ " model_output = torch.cat((probs0, probs1, probs2), dim=1)\n",
1642
+ " \n",
1643
+ " val_outputs = meta_model(model_output)\n",
1644
+ " \n",
1645
+ "\n",
1646
+ " val_loss = criterion(val_outputs, y_meta_val)\n",
1647
+ "\n",
1648
+ " _, predicted = torch.max(val_outputs.data, 1)\n",
1649
+ " all_preds.extend(predicted.cpu().numpy())\n",
1650
+ " all_targets.extend(y_meta_val.cpu().numpy())\n",
1651
+ " all_outputs.extend(val_outputs.cpu().numpy())\n",
1652
+ " batch_time = time.time() - start_time\n",
1653
+ " batch_times.append(batch_time)\n",
1654
+ " print(f\"Batch {batch_idx + 1}: Time = {batch_time:.7f} seconds\")\n",
1655
+ "\n",
1656
+ " precision, recall, f1, _ = precision_recall_fscore_support(all_targets, all_preds, average='weighted', zero_division=0)\n",
1657
+ " accuracy = accuracy_score(all_targets, all_preds)\n",
1658
+ " \n",
1659
+ " print(f\"Loss: {loss.item()}, Val Loss: {val_loss.item()}\")\n",
1660
+ " \n",
1661
+ " print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}, Accuracy: {accuracy:.4f}')\n",
1662
+ "\n",
1663
+ "average_batch_time = sum(batch_times) / len(batch_times)\n",
1664
+ "\n",
1665
+ "print(f\"Average Batch Time: {average_batch_time:.7f} seconds\")\n",
1666
+ "\n",
1667
+ "#torch.save(meta_model.state_dict(), '/home/evan/D1/project/code/meta_model/meta_model_3')"
1668
+ ]
1669
+ }
1670
+ ],
1671
+ "metadata": {
1672
+ "kernelspec": {
1673
+ "display_name": "Python (evan31818)",
1674
+ "language": "python",
1675
+ "name": "evan31818"
1676
+ },
1677
+ "language_info": {
1678
+ "codemirror_mode": {
1679
+ "name": "ipython",
1680
+ "version": 3
1681
+ },
1682
+ "file_extension": ".py",
1683
+ "mimetype": "text/x-python",
1684
+ "name": "python",
1685
+ "nbconvert_exporter": "python",
1686
+ "pygments_lexer": "ipython3",
1687
+ "version": "3.8.19"
1688
+ }
1689
+ },
1690
+ "nbformat": 4,
1691
+ "nbformat_minor": 5
1692
+ }