suryadev1 commited on
Commit
a326aed
·
1 Parent(s): f9c5a79

added the high and low roc value

Browse files
.ipynb_checkpoints/Untitled-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
.ipynb_checkpoints/distinguish_high_low_label-checkpoint.ipynb ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "960bac80-51c7-4e9f-ad2d-84cd6c710f98",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pickle\n",
11
+ "import pandas as pd"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 4,
17
+ "id": "a34f21d0-0854-4a54-8f93-67718b2f969e",
18
+ "metadata": {},
19
+ "outputs": [],
20
+ "source": [
21
+ "file_path = \"roc_data2.pkl\"\n",
22
+ "\n",
23
+ "# Open and load the pickle file\n",
24
+ "with open(file_path, 'rb') as file:\n",
25
+ " data = pickle.load(file)\n",
26
+ "\n",
27
+ "\n",
28
+ "# Print or use the data\n",
29
+ "# data[2]"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 5,
35
+ "id": "f9febed4-ce50-4e30-96ea-4b538ce2f9a1",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "inc_slider=1\n",
40
+ "parent_location=\"ratio_proportion_change3_2223/sch_largest_100-coded/finetuning/\"\n",
41
+ "test_info_location=parent_location+\"fullTest/test_info.txt\"\n",
42
+ "test_location=parent_location+\"fullTest/test.txt\"\n",
43
+ "test_info = pd.read_csv(test_info_location, sep=',', header=None, engine='python')\n",
44
+ "grad_rate_data = pd.DataFrame(pd.read_pickle('school_grduation_rate.pkl'),columns=['school_number','grad_rate']) # Load the grad_rate data\n",
45
+ "\n",
46
+ "# Step 1: Extract unique school numbers from test_info\n",
47
+ "unique_schools = test_info[0].unique()\n",
48
+ "\n",
49
+ "# Step 2: Filter the grad_rate_data using the unique school numbers\n",
50
+ "schools = grad_rate_data[grad_rate_data['school_number'].isin(unique_schools)]\n",
51
+ "\n",
52
+ "# Define a threshold for high and low graduation rates (adjust as needed)\n",
53
+ "grad_rate_threshold = 0.9 \n",
54
+ "\n",
55
+ "# Step 4: Divide schools into high and low graduation rate groups\n",
56
+ "high_grad_schools = schools[schools['grad_rate'] >= grad_rate_threshold]['school_number'].unique()\n",
57
+ "low_grad_schools = schools[schools['grad_rate'] < grad_rate_threshold]['school_number'].unique()\n",
58
+ "\n",
59
+ "# Step 5: Sample percentage of schools from each group\n",
60
+ "high_sample = pd.Series(high_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist()\n",
61
+ "low_sample = pd.Series(low_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist()\n",
62
+ "\n",
63
+ "# Step 6: Combine the sampled schools\n",
64
+ "random_schools = high_sample + low_sample\n",
65
+ "\n",
66
+ "# Step 7: Get indices for the sampled schools\n",
67
+ "indices = test_info[test_info[0].isin(random_schools)].index.tolist()\n",
68
+ "\n"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 6,
74
+ "id": "fdfdf4b6-2752-4a21-9880-869af69f20cf",
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "high_indices = test_info[(test_info[0].isin(high_sample))].index.tolist()\n",
79
+ "low_indices = test_info[(test_info[0].isin(low_sample))].index.tolist()"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": 7,
85
+ "id": "a79a4598-5702-4cc8-9f07-8e18fdda648b",
86
+ "metadata": {},
87
+ "outputs": [
88
+ {
89
+ "data": {
90
+ "text/plain": [
91
+ "997"
92
+ ]
93
+ },
94
+ "execution_count": 7,
95
+ "metadata": {},
96
+ "output_type": "execute_result"
97
+ }
98
+ ],
99
+ "source": [
100
+ "len(high_indices)+len(low_indices)\n"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 8,
106
+ "id": "4707f3e6-2f44-46d8-ad8c-b6c244f693af",
107
+ "metadata": {},
108
+ "outputs": [
109
+ {
110
+ "data": {
111
+ "text/html": [
112
+ "<div>\n",
113
+ "<style scoped>\n",
114
+ " .dataframe tbody tr th:only-of-type {\n",
115
+ " vertical-align: middle;\n",
116
+ " }\n",
117
+ "\n",
118
+ " .dataframe tbody tr th {\n",
119
+ " vertical-align: top;\n",
120
+ " }\n",
121
+ "\n",
122
+ " .dataframe thead th {\n",
123
+ " text-align: right;\n",
124
+ " }\n",
125
+ "</style>\n",
126
+ "<table border=\"1\" class=\"dataframe\">\n",
127
+ " <thead>\n",
128
+ " <tr style=\"text-align: right;\">\n",
129
+ " <th></th>\n",
130
+ " <th>0</th>\n",
131
+ " </tr>\n",
132
+ " </thead>\n",
133
+ " <tbody>\n",
134
+ " <tr>\n",
135
+ " <th>5342</th>\n",
136
+ " <td>PercentChange-0\\tNumeratorQuantity1-0\\tNumerat...</td>\n",
137
+ " </tr>\n",
138
+ " <tr>\n",
139
+ " <th>5343</th>\n",
140
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
141
+ " </tr>\n",
142
+ " <tr>\n",
143
+ " <th>5344</th>\n",
144
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
145
+ " </tr>\n",
146
+ " <tr>\n",
147
+ " <th>5345</th>\n",
148
+ " <td>PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...</td>\n",
149
+ " </tr>\n",
150
+ " <tr>\n",
151
+ " <th>5346</th>\n",
152
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tDenomin...</td>\n",
153
+ " </tr>\n",
154
+ " <tr>\n",
155
+ " <th>...</th>\n",
156
+ " <td>...</td>\n",
157
+ " </tr>\n",
158
+ " <tr>\n",
159
+ " <th>113359</th>\n",
160
+ " <td>PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...</td>\n",
161
+ " </tr>\n",
162
+ " <tr>\n",
163
+ " <th>113360</th>\n",
164
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
165
+ " </tr>\n",
166
+ " <tr>\n",
167
+ " <th>113361</th>\n",
168
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
169
+ " </tr>\n",
170
+ " <tr>\n",
171
+ " <th>113362</th>\n",
172
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
173
+ " </tr>\n",
174
+ " <tr>\n",
175
+ " <th>113363</th>\n",
176
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
177
+ " </tr>\n",
178
+ " </tbody>\n",
179
+ "</table>\n",
180
+ "<p>997 rows × 1 columns</p>\n",
181
+ "</div>"
182
+ ],
183
+ "text/plain": [
184
+ " 0\n",
185
+ "5342 PercentChange-0\\tNumeratorQuantity1-0\\tNumerat...\n",
186
+ "5343 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
187
+ "5344 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
188
+ "5345 PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...\n",
189
+ "5346 PercentChange-0\\tNumeratorQuantity2-0\\tDenomin...\n",
190
+ "... ...\n",
191
+ "113359 PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...\n",
192
+ "113360 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
193
+ "113361 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
194
+ "113362 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
195
+ "113363 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
196
+ "\n",
197
+ "[997 rows x 1 columns]"
198
+ ]
199
+ },
200
+ "execution_count": 8,
201
+ "metadata": {},
202
+ "output_type": "execute_result"
203
+ }
204
+ ],
205
+ "source": [
206
+ "# Load the test file and select rows based on indices\n",
207
+ "test = pd.read_csv(test_location, sep=',', header=None, engine='python')\n",
208
+ "selected_rows_df2 = test.loc[indices]\n",
209
+ "selected_rows_df2"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": 11,
215
+ "id": "1d0c3d49-061f-486b-9c19-cf20945f3207",
216
+ "metadata": {},
217
+ "outputs": [],
218
+ "source": [
219
+ "graduation_groups = [\n",
220
+ " 'high' if idx in high_indices else 'low' for idx in selected_rows_df2.index\n",
221
+ "]\n",
222
+ "# graduation_groups"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "code",
227
+ "execution_count": 43,
228
+ "id": "ad0ce4a1-27fa-4867-8061-4054dbb340df",
229
+ "metadata": {},
230
+ "outputs": [],
231
+ "source": [
232
+ "t_label=data[0]\n",
233
+ "p_label=data[1]"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": 47,
239
+ "id": "a4f4a2b9-3134-42ac-871b-4e117098cd0e",
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "# Step 1: Align graduation_group, t_label, and p_label\n",
244
+ "aligned_labels = list(zip(graduation_groups, t_label, p_label))\n",
245
+ "\n",
246
+ "# Step 2: Separate the labels for high and low groups\n",
247
+ "high_t_labels = [t for grad, t, p in aligned_labels if grad == 'high']\n",
248
+ "low_t_labels = [t for grad, t, p in aligned_labels if grad == 'low']\n",
249
+ "\n",
250
+ "high_p_labels = [p for grad, t, p in aligned_labels if grad == 'high']\n",
251
+ "low_p_labels = [p for grad, t, p in aligned_labels if grad == 'low']\n",
252
+ "\n"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": 50,
258
+ "id": "c8e34660-83d0-46a1-a218-95d609e11729",
259
+ "metadata": {},
260
+ "outputs": [
261
+ {
262
+ "data": {
263
+ "text/plain": [
264
+ "997"
265
+ ]
266
+ },
267
+ "execution_count": 50,
268
+ "metadata": {},
269
+ "output_type": "execute_result"
270
+ }
271
+ ],
272
+ "source": [
273
+ "len(low_t_labels)+len(high_t_labels)"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "code",
278
+ "execution_count": 51,
279
+ "id": "c11050db-2636-4c50-9cd4-b9943e5cee83",
280
+ "metadata": {},
281
+ "outputs": [],
282
+ "source": [
283
+ "from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, roc_auc_score"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 52,
289
+ "id": "e1309e93-7063-4f48-bbc7-11a0d449c34e",
290
+ "metadata": {},
291
+ "outputs": [
292
+ {
293
+ "name": "stdout",
294
+ "output_type": "stream",
295
+ "text": [
296
+ "ROC-AUC Score for High Graduation Rate Group: 0.675\n",
297
+ "ROC-AUC Score for Low Graduation Rate Group: 0.7489795918367347\n"
298
+ ]
299
+ }
300
+ ],
301
+ "source": [
302
+ "high_roc_auc = roc_auc_score(high_t_labels, high_p_labels) if len(set(high_t_labels)) > 1 else None\n",
303
+ "low_roc_auc = roc_auc_score(low_t_labels, low_p_labels) if len(set(low_t_labels)) > 1 else None\n",
304
+ "\n",
305
+ "print(\"ROC-AUC Score for High Graduation Rate Group:\", high_roc_auc)\n",
306
+ "print(\"ROC-AUC Score for Low Graduation Rate Group:\", low_roc_auc)"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "code",
311
+ "execution_count": 4,
312
+ "id": "a99e7812-817d-4f9f-b6fa-1a58aa3a34dc",
313
+ "metadata": {},
314
+ "outputs": [
315
+ {
316
+ "ename": "TypeError",
317
+ "evalue": "cannot convert the series to <class 'int'>",
318
+ "output_type": "error",
319
+ "traceback": [
320
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
321
+ "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
322
+ "Cell \u001b[1;32mIn[4], line 47\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(test_info_location, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[0;32m 45\u001b[0m data \u001b[38;5;241m=\u001b[39m file\u001b[38;5;241m.\u001b[39mreadlines()\n\u001b[1;32m---> 47\u001b[0m ideal_opt_task \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mint\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtest_info\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m7\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# Assuming test_info[7] is accessible and holds the ideal task (1 or 2)\u001b[39;00m\n\u001b[0;32m 49\u001b[0m \u001b[38;5;66;03m# Initialize counters\u001b[39;00m\n\u001b[0;32m 50\u001b[0m task_counts \u001b[38;5;241m=\u001b[39m {\n\u001b[0;32m 51\u001b[0m \u001b[38;5;241m1\u001b[39m: {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124monly_opt1\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124monly_opt2\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mboth\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m},\n\u001b[0;32m 52\u001b[0m \u001b[38;5;241m2\u001b[39m: {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124monly_opt1\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124monly_opt2\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mboth\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;241m0\u001b[39m}\n\u001b[0;32m 53\u001b[0m }\n",
323
+ "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\pandas\\core\\series.py:230\u001b[0m, in \u001b[0;36m_coerce_method.<locals>.wrapper\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 222\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[0;32m 223\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCalling \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconverter\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m on a single element Series is \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 224\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdeprecated and will raise a TypeError in the future. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 227\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39mfind_stack_level(),\n\u001b[0;32m 228\u001b[0m )\n\u001b[0;32m 229\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m converter(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39miloc[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m--> 230\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcannot convert the series to \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconverter\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
324
+ "\u001b[1;31mTypeError\u001b[0m: cannot convert the series to <class 'int'>"
325
+ ]
326
+ }
327
+ ],
328
+ "source": [
329
+ "parent_location=\"ratio_proportion_change3_2223/sch_largest_100-coded/finetuning/\"\n",
330
+ "test_info_location=parent_location+\"fullTest/test_info.txt\"\n",
331
+ "test_location=parent_location+\"fullTest/test.txt\"\n",
332
+ "test_info = pd.read_csv(test_info_location, sep=',', header=None, engine='python')\n",
333
+ "\n",
334
+ "def analyze_row(row, ideal_opt_task):\n",
335
+ " # Split the row into fields\n",
336
+ " fields = row.split(\"\\t\")\n",
337
+ "\n",
338
+ " # Define tasks for OptionalTask_1, OptionalTask_2, and FinalAnswer\n",
339
+ " optional_task_1_subtasks = [\"DenominatorFactor\", \"NumeratorFactor\", \"EquationAnswer\"]\n",
340
+ " optional_task_2_subtasks = [\n",
341
+ " \"FirstRow2:1\", \"FirstRow2:2\", \"FirstRow1:1\", \"FirstRow1:2\", \n",
342
+ " \"SecondRow\", \"ThirdRow\"\n",
343
+ " ]\n",
344
+ " final_answer_tasks = [\"FinalAnswer\"]\n",
345
+ "\n",
346
+ " # Helper function to evaluate task attempts\n",
347
+ " def evaluate_tasks(fields, tasks):\n",
348
+ " task_status = {}\n",
349
+ " for task in tasks:\n",
350
+ " relevant_attempts = [f for f in fields if task in f]\n",
351
+ " if any(\"OK\" in attempt for attempt in relevant_attempts):\n",
352
+ " task_status[task] = \"Attempted (Successful)\"\n",
353
+ " elif any(\"ERROR\" in attempt for attempt in relevant_attempts):\n",
354
+ " task_status[task] = \"Attempted (Error)\"\n",
355
+ " elif any(\"JIT\" in attempt for attempt in relevant_attempts):\n",
356
+ " task_status[task] = \"Attempted (JIT)\"\n",
357
+ " else:\n",
358
+ " task_status[task] = \"Unattempted\"\n",
359
+ " return task_status\n",
360
+ "\n",
361
+ " # Evaluate tasks for each category\n",
362
+ " optional_task_1_status = evaluate_tasks(fields, optional_task_1_subtasks)\n",
363
+ " optional_task_2_status = evaluate_tasks(fields, optional_task_2_subtasks)\n",
364
+ "\n",
365
+ " # Check if tasks have any successful attempt\n",
366
+ " opt1_done = any(status == \"Attempted (Successful)\" for status in optional_task_1_status.values())\n",
367
+ " opt2_done = any(status == \"Attempted (Successful)\" for status in optional_task_2_status.values())\n",
368
+ "\n",
369
+ " return opt1_done, opt2_done\n",
370
+ "\n",
371
+ "# Read data from test_info.txt\n",
372
+ "with open(test_info_location, \"r\") as file:\n",
373
+ " data = file.readlines()\n",
374
+ "\n",
375
+ "ideal_opt_task = int(test_info[6]) # Assuming test_info[7] is accessible and holds the ideal task (1 or 2)\n",
376
+ "\n",
377
+ "# Initialize counters\n",
378
+ "task_counts = {\n",
379
+ " 1: {\"only_opt1\": 0, \"only_opt2\": 0, \"both\": 0},\n",
380
+ " 2: {\"only_opt1\": 0, \"only_opt2\": 0, \"both\": 0}\n",
381
+ "}\n",
382
+ "\n",
383
+ "for row in data:\n",
384
+ " row = row.strip()\n",
385
+ " if not row:\n",
386
+ " continue\n",
387
+ " opt1_done, opt2_done = analyze_row(row, ideal_opt_task)\n",
388
+ "\n",
389
+ " if ideal_opt_task == 0:\n",
390
+ " if opt1_done and not opt2_done:\n",
391
+ " task_counts[1][\"only_opt1\"] += 1\n",
392
+ " elif not opt1_done and opt2_done:\n",
393
+ " task_counts[1][\"only_opt2\"] += 1\n",
394
+ " elif opt1_done and opt2_done:\n",
395
+ " task_counts[1][\"both\"] += 1\n",
396
+ " elif ideal_opt_task == 1:\n",
397
+ " if opt1_done and not opt2_done:\n",
398
+ " task_counts[2][\"only_opt1\"] += 1\n",
399
+ " elif not opt1_done and opt2_done:\n",
400
+ " task_counts[2][\"only_opt2\"] += 1\n",
401
+ " elif opt1_done and opt2_done:\n",
402
+ " task_counts[2][\"both\"] += 1\n",
403
+ "\n",
404
+ "# Create a string output for results\n",
405
+ "output_summary = \"Task Analysis Summary:\\n\"\n",
406
+ "output_summary += \"-----------------------\\n\"\n",
407
+ "\n",
408
+ "for ideal_task, counts in task_counts.items():\n",
409
+ " output_summary += f\"Ideal Task = OptionalTask_{ideal_task}:\\n\"\n",
410
+ " output_summary += f\" Only OptionalTask_1 done: {counts['only_opt1']}\\n\"\n",
411
+ " output_summary += f\" Only OptionalTask_2 done: {counts['only_opt2']}\\n\"\n",
412
+ " output_summary += f\" Both done: {counts['both']}\\n\"\n",
413
+ "\n",
414
+ "print(output_summary)"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": null,
420
+ "id": "65ad9383-741f-44eb-8e8f-853ee7bc52a2",
421
+ "metadata": {},
422
+ "outputs": [],
423
+ "source": []
424
+ }
425
+ ],
426
+ "metadata": {
427
+ "kernelspec": {
428
+ "display_name": "Python 3 (ipykernel)",
429
+ "language": "python",
430
+ "name": "python3"
431
+ },
432
+ "language_info": {
433
+ "codemirror_mode": {
434
+ "name": "ipython",
435
+ "version": 3
436
+ },
437
+ "file_extension": ".py",
438
+ "mimetype": "text/x-python",
439
+ "name": "python",
440
+ "nbconvert_exporter": "python",
441
+ "pygments_lexer": "ipython3",
442
+ "version": "3.12.4"
443
+ }
444
+ },
445
+ "nbformat": 4,
446
+ "nbformat_minor": 5
447
+ }
Untitled.ipynb CHANGED
@@ -623,7 +623,7 @@
623
  "uri": "us-docker.pkg.dev/deeplearning-platform-release/gcr.io/base-cu113:m122"
624
  },
625
  "kernelspec": {
626
- "display_name": "Python 3",
627
  "language": "python",
628
  "name": "python3"
629
  },
@@ -637,7 +637,7 @@
637
  "name": "python",
638
  "nbconvert_exporter": "python",
639
  "pygments_lexer": "ipython3",
640
- "version": "3.10.14"
641
  }
642
  },
643
  "nbformat": 4,
 
623
  "uri": "us-docker.pkg.dev/deeplearning-platform-release/gcr.io/base-cu113:m122"
624
  },
625
  "kernelspec": {
626
+ "display_name": "Python 3 (ipykernel)",
627
  "language": "python",
628
  "name": "python3"
629
  },
 
637
  "name": "python",
638
  "nbconvert_exporter": "python",
639
  "pygments_lexer": "ipython3",
640
+ "version": "3.12.4"
641
  }
642
  },
643
  "nbformat": 4,
app.py CHANGED
@@ -8,6 +8,7 @@ import shutil
8
  import matplotlib.pyplot as plt
9
  from sklearn.metrics import roc_curve, auc
10
  import pandas as pd
 
11
  # Define the function to process the input file and model selection
12
 
13
  def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
@@ -66,6 +67,8 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
66
 
67
  # Step 7: Get indices for the sampled schools
68
  indices = test_info[test_info[0].isin(random_schools)].index.tolist()
 
 
69
 
70
  # Load the test file and select rows based on indices
71
  test = pd.read_csv(test_location, sep=',', header=None, engine='python')
@@ -74,7 +77,27 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
74
  # Save the selected rows to a file
75
  selected_rows_df2.to_csv('selected_rows.txt', sep='\t', index=False, header=False, quoting=3, escapechar=' ')
76
 
77
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  # For demonstration purposes, we'll just return the content with the selected model name
79
 
80
  # print(checkpoint)
@@ -87,7 +110,7 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
87
  # model_name="highGRschool10"
88
  # Function to analyze each row
89
  def analyze_row(row):
90
- # Split the row into fields
91
  fields = row.split("\t")
92
 
93
  # Define tasks for OptionalTask_1, OptionalTask_2, and FinalAnswer
@@ -96,14 +119,12 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
96
  "FirstRow2:1", "FirstRow2:2", "FirstRow1:1", "FirstRow1:2",
97
  "SecondRow", "ThirdRow"
98
  ]
99
- final_answer_tasks = ["FinalAnswer"]
100
 
101
  # Helper function to evaluate task attempts
102
  def evaluate_tasks(fields, tasks):
103
  task_status = {}
104
  for task in tasks:
105
  relevant_attempts = [f for f in fields if task in f]
106
- # print(relevant_attempts)
107
  if any("OK" in attempt for attempt in relevant_attempts):
108
  task_status[task] = "Attempted (Successful)"
109
  elif any("ERROR" in attempt for attempt in relevant_attempts):
@@ -117,40 +138,62 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
117
  # Evaluate tasks for each category
118
  optional_task_1_status = evaluate_tasks(fields, optional_task_1_subtasks)
119
  optional_task_2_status = evaluate_tasks(fields, optional_task_2_subtasks)
120
- final_answer_status = evaluate_tasks(fields, final_answer_tasks)
121
-
122
- # Combine results
123
- result = {
124
- "OptionalTask_1": optional_task_1_status,
125
- "OptionalTask_2": optional_task_2_status,
126
- "FinalAnswer": final_answer_status,
127
- }
128
- return result
129
  # Read data from test_info.txt
130
  with open(test_info_location, "r") as file:
131
  data = file.readlines()
132
- results = [analyze_row(row.strip()) for row in data if row.strip()]
133
 
134
- status_counts = {}
 
135
 
 
 
 
 
 
136
 
137
- for result in results:
138
- for task_category, task_statuses in result.items():
139
- for task, status in task_statuses.items():
140
- if task not in status_counts:
141
- status_counts[task] = {"Attempted (Successful)": 0, "Attempted (Error)": 0,
142
- "Attempted (JIT)": 0, "Unattempted": 0}
143
- status_counts[task][status] += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  # Create a string output for results
146
  output_summary = "Task Analysis Summary:\n"
147
  output_summary += "-----------------------\n"
148
 
149
- for task, statuses in status_counts.items():
150
- output_summary += f"Task: {task}\n"
151
- for status, count in statuses.items():
152
- output_summary += f" {status}: {count}\n"
 
153
 
 
154
 
155
  progress(0.2, desc="analysis done!! Executing models")
156
  print("finetuned task: ",finetune_task)
@@ -175,10 +218,12 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
175
  result[key]=value
176
  else:
177
  result[key]=float(value)
 
 
178
  # Create a plot
179
  with open("roc_data.pkl", "rb") as f:
180
  fpr, tpr, _ = pickle.load(f)
181
-
182
  roc_auc = auc(fpr, tpr)
183
  fig, ax = plt.subplots()
184
  ax.plot(fpr, tpr, color='blue', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
@@ -191,6 +236,10 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
191
  plot_path = "plot.png"
192
  fig.savefig(plot_path)
193
  plt.close(fig)
 
 
 
 
194
  progress(1.0)
195
  # Prepare text output
196
  text_output = f"Model: {model_name}\nResult:\n{result}"
@@ -203,9 +252,12 @@ def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
203
  Total Schools in test: {len(unique_schools):.4f}\n
204
  Total number of instances having Schools with HGR : {len(high_sample):.4f}\n
205
  Total number of instances having Schools with LGR: {len(low_sample):.4f}\n
 
 
 
206
  -----------------\n
207
  """
208
- return text_output,plot_path
209
 
210
  # List of models for the dropdown menu
211
 
@@ -456,11 +508,11 @@ tbody.svelte-18wv37q>tr.svelte-18wv37q:nth-child(odd) {
456
  with gr.Row():
457
  output_text = gr.Textbox(label="")
458
  output_image = gr.Image(label="ROC")
459
- # output_summary = gr.Textbox(label="Summary")
460
 
461
  btn = gr.Button("Submit")
462
 
463
- btn.click(fn=process_file, inputs=[model_dropdown,increment_slider], outputs=[output_text,output_image])
464
 
465
 
466
  # Launch the app
 
8
  import matplotlib.pyplot as plt
9
  from sklearn.metrics import roc_curve, auc
10
  import pandas as pd
11
+ from sklearn.metrics import roc_auc_score
12
  # Define the function to process the input file and model selection
13
 
14
  def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)):
 
67
 
68
  # Step 7: Get indices for the sampled schools
69
  indices = test_info[test_info[0].isin(random_schools)].index.tolist()
70
+ high_indices = test_info[(test_info[0].isin(high_sample))].index.tolist()
71
+ low_indices = test_info[(test_info[0].isin(low_sample))].index.tolist()
72
 
73
  # Load the test file and select rows based on indices
74
  test = pd.read_csv(test_location, sep=',', header=None, engine='python')
 
77
  # Save the selected rows to a file
78
  selected_rows_df2.to_csv('selected_rows.txt', sep='\t', index=False, header=False, quoting=3, escapechar=' ')
79
 
80
+ graduation_groups = [
81
+ 'high' if idx in high_indices else 'low' for idx in selected_rows_df2.index
82
+ ]
83
+
84
+
85
+ with open("roc_data2.pkl", 'rb') as file:
86
+ data = pickle.load(file)
87
+ t_label=data[0]
88
+ p_label=data[1]
89
+ # Step 1: Align graduation_group, t_label, and p_label
90
+ aligned_labels = list(zip(graduation_groups, t_label, p_label))
91
+
92
+ # Step 2: Separate the labels for high and low groups
93
+ high_t_labels = [t for grad, t, p in aligned_labels if grad == 'high']
94
+ low_t_labels = [t for grad, t, p in aligned_labels if grad == 'low']
95
+
96
+ high_p_labels = [p for grad, t, p in aligned_labels if grad == 'high']
97
+ low_p_labels = [p for grad, t, p in aligned_labels if grad == 'low']
98
+
99
+ high_roc_auc = roc_auc_score(high_t_labels, high_p_labels) if len(set(high_t_labels)) > 1 else None
100
+ low_roc_auc = roc_auc_score(low_t_labels, low_p_labels) if len(set(low_t_labels)) > 1 else None
101
  # For demonstration purposes, we'll just return the content with the selected model name
102
 
103
  # print(checkpoint)
 
110
  # model_name="highGRschool10"
111
  # Function to analyze each row
112
  def analyze_row(row):
113
+ # Split the row into fields
114
  fields = row.split("\t")
115
 
116
  # Define tasks for OptionalTask_1, OptionalTask_2, and FinalAnswer
 
119
  "FirstRow2:1", "FirstRow2:2", "FirstRow1:1", "FirstRow1:2",
120
  "SecondRow", "ThirdRow"
121
  ]
 
122
 
123
  # Helper function to evaluate task attempts
124
  def evaluate_tasks(fields, tasks):
125
  task_status = {}
126
  for task in tasks:
127
  relevant_attempts = [f for f in fields if task in f]
 
128
  if any("OK" in attempt for attempt in relevant_attempts):
129
  task_status[task] = "Attempted (Successful)"
130
  elif any("ERROR" in attempt for attempt in relevant_attempts):
 
138
  # Evaluate tasks for each category
139
  optional_task_1_status = evaluate_tasks(fields, optional_task_1_subtasks)
140
  optional_task_2_status = evaluate_tasks(fields, optional_task_2_subtasks)
141
+
142
+ # Check if tasks have any successful attempt
143
+ opt1_done = any(status == "Attempted (Successful)" for status in optional_task_1_status.values())
144
+ opt2_done = any(status == "Attempted (Successful)" for status in optional_task_2_status.values())
145
+
146
+ return opt1_done, opt2_done
147
+
148
+ # Read data from test_info.txt
 
149
  # Read data from test_info.txt
150
  with open(test_info_location, "r") as file:
151
  data = file.readlines()
 
152
 
153
+ # Assuming test_info[7] is a list with ideal tasks for each instance
154
+ ideal_tasks = test_info[6] # A list where each element is either 1 or 2
155
 
156
+ # Initialize counters
157
+ task_counts = {
158
+ 1: {"only_opt1": 0, "only_opt2": 0, "both": 0},
159
+ 2: {"only_opt1": 0, "only_opt2": 0, "both": 0}
160
+ }
161
 
162
+ # Analyze rows
163
+ for i, row in enumerate(data):
164
+ row = row.strip()
165
+ if not row:
166
+ continue
167
+
168
+ ideal_task = ideal_tasks[i] # Get the ideal task for the current row
169
+ opt1_done, opt2_done = analyze_row(row)
170
+
171
+ if ideal_task == 0:
172
+ if opt1_done and not opt2_done:
173
+ task_counts[1]["only_opt1"] += 1
174
+ elif not opt1_done and opt2_done:
175
+ task_counts[1]["only_opt2"] += 1
176
+ elif opt1_done and opt2_done:
177
+ task_counts[1]["both"] += 1
178
+ elif ideal_task == 1:
179
+ if opt1_done and not opt2_done:
180
+ task_counts[2]["only_opt1"] += 1
181
+ elif not opt1_done and opt2_done:
182
+ task_counts[2]["only_opt2"] += 1
183
+ elif opt1_done and opt2_done:
184
+ task_counts[2]["both"] += 1
185
 
186
  # Create a string output for results
187
  output_summary = "Task Analysis Summary:\n"
188
  output_summary += "-----------------------\n"
189
 
190
+ for ideal_task, counts in task_counts.items():
191
+ output_summary += f"Ideal Task = OptionalTask_{ideal_task}:\n"
192
+ output_summary += f" Only OptionalTask_1 done: {counts['only_opt1']}\n"
193
+ output_summary += f" Only OptionalTask_2 done: {counts['only_opt2']}\n"
194
+ output_summary += f" Both done: {counts['both']}\n"
195
 
196
+ # print(output_summary)
197
 
198
  progress(0.2, desc="analysis done!! Executing models")
199
  print("finetuned task: ",finetune_task)
 
218
  result[key]=value
219
  else:
220
  result[key]=float(value)
221
+ result["ROC score of HGR"]=high_roc_auc
222
+ result["ROC score of LGR"]=low_roc_auc
223
  # Create a plot
224
  with open("roc_data.pkl", "rb") as f:
225
  fpr, tpr, _ = pickle.load(f)
226
+ # print(fpr,tpr)
227
  roc_auc = auc(fpr, tpr)
228
  fig, ax = plt.subplots()
229
  ax.plot(fpr, tpr, color='blue', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
 
236
  plot_path = "plot.png"
237
  fig.savefig(plot_path)
238
  plt.close(fig)
239
+
240
+
241
+
242
+
243
  progress(1.0)
244
  # Prepare text output
245
  text_output = f"Model: {model_name}\nResult:\n{result}"
 
252
  Total Schools in test: {len(unique_schools):.4f}\n
253
  Total number of instances having Schools with HGR : {len(high_sample):.4f}\n
254
  Total number of instances having Schools with LGR: {len(low_sample):.4f}\n
255
+
256
+ ROC score of HGR: {high_roc_auc}\n
257
+ ROC score of LGR: {low_roc_auc}\n
258
  -----------------\n
259
  """
260
+ return text_output,plot_path,output_summary
261
 
262
  # List of models for the dropdown menu
263
 
 
508
  with gr.Row():
509
  output_text = gr.Textbox(label="")
510
  output_image = gr.Image(label="ROC")
511
+ output_summary = gr.Textbox(label="Summary")
512
 
513
  btn = gr.Button("Submit")
514
 
515
+ btn.click(fn=process_file, inputs=[model_dropdown,increment_slider], outputs=[output_text,output_image,output_summary])
516
 
517
 
518
  # Launch the app
distinguish_high_low_label.ipynb ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "960bac80-51c7-4e9f-ad2d-84cd6c710f98",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pickle\n",
11
+ "import pandas as pd"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 4,
17
+ "id": "a34f21d0-0854-4a54-8f93-67718b2f969e",
18
+ "metadata": {},
19
+ "outputs": [],
20
+ "source": [
21
+ "file_path = \"roc_data2.pkl\"\n",
22
+ "\n",
23
+ "# Open and load the pickle file\n",
24
+ "with open(file_path, 'rb') as file:\n",
25
+ " data = pickle.load(file)\n",
26
+ "\n",
27
+ "\n",
28
+ "# Print or use the data\n",
29
+ "# data[2]"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 5,
35
+ "id": "f9febed4-ce50-4e30-96ea-4b538ce2f9a1",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "inc_slider=1\n",
40
+ "parent_location=\"ratio_proportion_change3_2223/sch_largest_100-coded/finetuning/\"\n",
41
+ "test_info_location=parent_location+\"fullTest/test_info.txt\"\n",
42
+ "test_location=parent_location+\"fullTest/test.txt\"\n",
43
+ "test_info = pd.read_csv(test_info_location, sep=',', header=None, engine='python')\n",
44
+ "grad_rate_data = pd.DataFrame(pd.read_pickle('school_grduation_rate.pkl'),columns=['school_number','grad_rate']) # Load the grad_rate data\n",
45
+ "\n",
46
+ "# Step 1: Extract unique school numbers from test_info\n",
47
+ "unique_schools = test_info[0].unique()\n",
48
+ "\n",
49
+ "# Step 2: Filter the grad_rate_data using the unique school numbers\n",
50
+ "schools = grad_rate_data[grad_rate_data['school_number'].isin(unique_schools)]\n",
51
+ "\n",
52
+ "# Define a threshold for high and low graduation rates (adjust as needed)\n",
53
+ "grad_rate_threshold = 0.9 \n",
54
+ "\n",
55
+ "# Step 4: Divide schools into high and low graduation rate groups\n",
56
+ "high_grad_schools = schools[schools['grad_rate'] >= grad_rate_threshold]['school_number'].unique()\n",
57
+ "low_grad_schools = schools[schools['grad_rate'] < grad_rate_threshold]['school_number'].unique()\n",
58
+ "\n",
59
+ "# Step 5: Sample percentage of schools from each group\n",
60
+ "high_sample = pd.Series(high_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist()\n",
61
+ "low_sample = pd.Series(low_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist()\n",
62
+ "\n",
63
+ "# Step 6: Combine the sampled schools\n",
64
+ "random_schools = high_sample + low_sample\n",
65
+ "\n",
66
+ "# Step 7: Get indices for the sampled schools\n",
67
+ "indices = test_info[test_info[0].isin(random_schools)].index.tolist()\n",
68
+ "\n"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 6,
74
+ "id": "fdfdf4b6-2752-4a21-9880-869af69f20cf",
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "high_indices = test_info[(test_info[0].isin(high_sample))].index.tolist()\n",
79
+ "low_indices = test_info[(test_info[0].isin(low_sample))].index.tolist()"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": 7,
85
+ "id": "a79a4598-5702-4cc8-9f07-8e18fdda648b",
86
+ "metadata": {},
87
+ "outputs": [
88
+ {
89
+ "data": {
90
+ "text/plain": [
91
+ "997"
92
+ ]
93
+ },
94
+ "execution_count": 7,
95
+ "metadata": {},
96
+ "output_type": "execute_result"
97
+ }
98
+ ],
99
+ "source": [
100
+ "len(high_indices)+len(low_indices)\n"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 8,
106
+ "id": "4707f3e6-2f44-46d8-ad8c-b6c244f693af",
107
+ "metadata": {},
108
+ "outputs": [
109
+ {
110
+ "data": {
111
+ "text/html": [
112
+ "<div>\n",
113
+ "<style scoped>\n",
114
+ " .dataframe tbody tr th:only-of-type {\n",
115
+ " vertical-align: middle;\n",
116
+ " }\n",
117
+ "\n",
118
+ " .dataframe tbody tr th {\n",
119
+ " vertical-align: top;\n",
120
+ " }\n",
121
+ "\n",
122
+ " .dataframe thead th {\n",
123
+ " text-align: right;\n",
124
+ " }\n",
125
+ "</style>\n",
126
+ "<table border=\"1\" class=\"dataframe\">\n",
127
+ " <thead>\n",
128
+ " <tr style=\"text-align: right;\">\n",
129
+ " <th></th>\n",
130
+ " <th>0</th>\n",
131
+ " </tr>\n",
132
+ " </thead>\n",
133
+ " <tbody>\n",
134
+ " <tr>\n",
135
+ " <th>5342</th>\n",
136
+ " <td>PercentChange-0\\tNumeratorQuantity1-0\\tNumerat...</td>\n",
137
+ " </tr>\n",
138
+ " <tr>\n",
139
+ " <th>5343</th>\n",
140
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
141
+ " </tr>\n",
142
+ " <tr>\n",
143
+ " <th>5344</th>\n",
144
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
145
+ " </tr>\n",
146
+ " <tr>\n",
147
+ " <th>5345</th>\n",
148
+ " <td>PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...</td>\n",
149
+ " </tr>\n",
150
+ " <tr>\n",
151
+ " <th>5346</th>\n",
152
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tDenomin...</td>\n",
153
+ " </tr>\n",
154
+ " <tr>\n",
155
+ " <th>...</th>\n",
156
+ " <td>...</td>\n",
157
+ " </tr>\n",
158
+ " <tr>\n",
159
+ " <th>113359</th>\n",
160
+ " <td>PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...</td>\n",
161
+ " </tr>\n",
162
+ " <tr>\n",
163
+ " <th>113360</th>\n",
164
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
165
+ " </tr>\n",
166
+ " <tr>\n",
167
+ " <th>113361</th>\n",
168
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
169
+ " </tr>\n",
170
+ " <tr>\n",
171
+ " <th>113362</th>\n",
172
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
173
+ " </tr>\n",
174
+ " <tr>\n",
175
+ " <th>113363</th>\n",
176
+ " <td>PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...</td>\n",
177
+ " </tr>\n",
178
+ " </tbody>\n",
179
+ "</table>\n",
180
+ "<p>997 rows × 1 columns</p>\n",
181
+ "</div>"
182
+ ],
183
+ "text/plain": [
184
+ " 0\n",
185
+ "5342 PercentChange-0\\tNumeratorQuantity1-0\\tNumerat...\n",
186
+ "5343 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
187
+ "5344 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
188
+ "5345 PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...\n",
189
+ "5346 PercentChange-0\\tNumeratorQuantity2-0\\tDenomin...\n",
190
+ "... ...\n",
191
+ "113359 PercentChange-0\\tNumeratorQuantity2-2\\tNumerat...\n",
192
+ "113360 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
193
+ "113361 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
194
+ "113362 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
195
+ "113363 PercentChange-0\\tNumeratorQuantity2-0\\tNumerat...\n",
196
+ "\n",
197
+ "[997 rows x 1 columns]"
198
+ ]
199
+ },
200
+ "execution_count": 8,
201
+ "metadata": {},
202
+ "output_type": "execute_result"
203
+ }
204
+ ],
205
+ "source": [
206
+ "# Load the test file and select rows based on indices\n",
207
+ "test = pd.read_csv(test_location, sep=',', header=None, engine='python')\n",
208
+ "selected_rows_df2 = test.loc[indices]\n",
209
+ "selected_rows_df2"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": 11,
215
+ "id": "1d0c3d49-061f-486b-9c19-cf20945f3207",
216
+ "metadata": {},
217
+ "outputs": [],
218
+ "source": [
219
+ "graduation_groups = [\n",
220
+ " 'high' if idx in high_indices else 'low' for idx in selected_rows_df2.index\n",
221
+ "]\n",
222
+ "# graduation_groups"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "code",
227
+ "execution_count": 43,
228
+ "id": "ad0ce4a1-27fa-4867-8061-4054dbb340df",
229
+ "metadata": {},
230
+ "outputs": [],
231
+ "source": [
232
+ "t_label=data[0]\n",
233
+ "p_label=data[1]"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": 47,
239
+ "id": "a4f4a2b9-3134-42ac-871b-4e117098cd0e",
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "# Step 1: Align graduation_group, t_label, and p_label\n",
244
+ "aligned_labels = list(zip(graduation_groups, t_label, p_label))\n",
245
+ "\n",
246
+ "# Step 2: Separate the labels for high and low groups\n",
247
+ "high_t_labels = [t for grad, t, p in aligned_labels if grad == 'high']\n",
248
+ "low_t_labels = [t for grad, t, p in aligned_labels if grad == 'low']\n",
249
+ "\n",
250
+ "high_p_labels = [p for grad, t, p in aligned_labels if grad == 'high']\n",
251
+ "low_p_labels = [p for grad, t, p in aligned_labels if grad == 'low']\n",
252
+ "\n"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": 50,
258
+ "id": "c8e34660-83d0-46a1-a218-95d609e11729",
259
+ "metadata": {},
260
+ "outputs": [
261
+ {
262
+ "data": {
263
+ "text/plain": [
264
+ "997"
265
+ ]
266
+ },
267
+ "execution_count": 50,
268
+ "metadata": {},
269
+ "output_type": "execute_result"
270
+ }
271
+ ],
272
+ "source": [
273
+ "len(low_t_labels)+len(high_t_labels)"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "code",
278
+ "execution_count": 51,
279
+ "id": "c11050db-2636-4c50-9cd4-b9943e5cee83",
280
+ "metadata": {},
281
+ "outputs": [],
282
+ "source": [
283
+ "from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, roc_auc_score"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 52,
289
+ "id": "e1309e93-7063-4f48-bbc7-11a0d449c34e",
290
+ "metadata": {},
291
+ "outputs": [
292
+ {
293
+ "name": "stdout",
294
+ "output_type": "stream",
295
+ "text": [
296
+ "ROC-AUC Score for High Graduation Rate Group: 0.675\n",
297
+ "ROC-AUC Score for Low Graduation Rate Group: 0.7489795918367347\n"
298
+ ]
299
+ }
300
+ ],
301
+ "source": [
302
+ "high_roc_auc = roc_auc_score(high_t_labels, high_p_labels) if len(set(high_t_labels)) > 1 else None\n",
303
+ "low_roc_auc = roc_auc_score(low_t_labels, low_p_labels) if len(set(low_t_labels)) > 1 else None\n",
304
+ "\n",
305
+ "print(\"ROC-AUC Score for High Graduation Rate Group:\", high_roc_auc)\n",
306
+ "print(\"ROC-AUC Score for Low Graduation Rate Group:\", low_roc_auc)"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "code",
311
+ "execution_count": 9,
312
+ "id": "a99e7812-817d-4f9f-b6fa-1a58aa3a34dc",
313
+ "metadata": {},
314
+ "outputs": [
315
+ {
316
+ "name": "stdout",
317
+ "output_type": "stream",
318
+ "text": [
319
+ "Task Analysis Summary:\n",
320
+ "-----------------------\n",
321
+ "Ideal Task = OptionalTask_1:\n",
322
+ " Only OptionalTask_1 done: 22501\n",
323
+ " Only OptionalTask_2 done: 20014\n",
324
+ " Both done: 24854\n",
325
+ "Ideal Task = OptionalTask_2:\n",
326
+ " Only OptionalTask_1 done: 12588\n",
327
+ " Only OptionalTask_2 done: 18942\n",
328
+ " Both done: 15147\n",
329
+ "\n"
330
+ ]
331
+ }
332
+ ],
333
+ "source": [
334
+ "def analyze_row(row):\n",
335
+ " # Split the row into fields\n",
336
+ " fields = row.split(\"\\t\")\n",
337
+ "\n",
338
+ " # Define tasks for OptionalTask_1, OptionalTask_2, and FinalAnswer\n",
339
+ " optional_task_1_subtasks = [\"DenominatorFactor\", \"NumeratorFactor\", \"EquationAnswer\"]\n",
340
+ " optional_task_2_subtasks = [\n",
341
+ " \"FirstRow2:1\", \"FirstRow2:2\", \"FirstRow1:1\", \"FirstRow1:2\", \n",
342
+ " \"SecondRow\", \"ThirdRow\"\n",
343
+ " ]\n",
344
+ "\n",
345
+ " # Helper function to evaluate task attempts\n",
346
+ " def evaluate_tasks(fields, tasks):\n",
347
+ " task_status = {}\n",
348
+ " for task in tasks:\n",
349
+ " relevant_attempts = [f for f in fields if task in f]\n",
350
+ " if any(\"OK\" in attempt for attempt in relevant_attempts):\n",
351
+ " task_status[task] = \"Attempted (Successful)\"\n",
352
+ " elif any(\"ERROR\" in attempt for attempt in relevant_attempts):\n",
353
+ " task_status[task] = \"Attempted (Error)\"\n",
354
+ " elif any(\"JIT\" in attempt for attempt in relevant_attempts):\n",
355
+ " task_status[task] = \"Attempted (JIT)\"\n",
356
+ " else:\n",
357
+ " task_status[task] = \"Unattempted\"\n",
358
+ " return task_status\n",
359
+ "\n",
360
+ " # Evaluate tasks for each category\n",
361
+ " optional_task_1_status = evaluate_tasks(fields, optional_task_1_subtasks)\n",
362
+ " optional_task_2_status = evaluate_tasks(fields, optional_task_2_subtasks)\n",
363
+ "\n",
364
+ " # Check if tasks have any successful attempt\n",
365
+ " opt1_done = any(status == \"Attempted (Successful)\" for status in optional_task_1_status.values())\n",
366
+ " opt2_done = any(status == \"Attempted (Successful)\" for status in optional_task_2_status.values())\n",
367
+ "\n",
368
+ " return opt1_done, opt2_done\n",
369
+ "\n",
370
+ "# Read data from test_info.txt\n",
371
+ "# Read data from test_info.txt\n",
372
+ "with open(test_info_location, \"r\") as file:\n",
373
+ " data = file.readlines()\n",
374
+ "\n",
375
+ "# Assuming test_info[7] is a list with ideal tasks for each instance\n",
376
+ "ideal_tasks = test_info[6] # A list where each element is either 1 or 2\n",
377
+ "\n",
378
+ "# Initialize counters\n",
379
+ "task_counts = {\n",
380
+ " 1: {\"only_opt1\": 0, \"only_opt2\": 0, \"both\": 0},\n",
381
+ " 2: {\"only_opt1\": 0, \"only_opt2\": 0, \"both\": 0}\n",
382
+ "}\n",
383
+ "\n",
384
+ "# Analyze rows\n",
385
+ "for i, row in enumerate(data):\n",
386
+ " row = row.strip()\n",
387
+ " if not row:\n",
388
+ " continue\n",
389
+ "\n",
390
+ " ideal_task = ideal_tasks[i] # Get the ideal task for the current row\n",
391
+ " opt1_done, opt2_done = analyze_row(row)\n",
392
+ "\n",
393
+ " if ideal_task == 0:\n",
394
+ " if opt1_done and not opt2_done:\n",
395
+ " task_counts[1][\"only_opt1\"] += 1\n",
396
+ " elif not opt1_done and opt2_done:\n",
397
+ " task_counts[1][\"only_opt2\"] += 1\n",
398
+ " elif opt1_done and opt2_done:\n",
399
+ " task_counts[1][\"both\"] += 1\n",
400
+ " elif ideal_task == 1:\n",
401
+ " if opt1_done and not opt2_done:\n",
402
+ " task_counts[2][\"only_opt1\"] += 1\n",
403
+ " elif not opt1_done and opt2_done:\n",
404
+ " task_counts[2][\"only_opt2\"] += 1\n",
405
+ " elif opt1_done and opt2_done:\n",
406
+ " task_counts[2][\"both\"] += 1\n",
407
+ "\n",
408
+ "# Create a string output for results\n",
409
+ "output_summary = \"Task Analysis Summary:\\n\"\n",
410
+ "output_summary += \"-----------------------\\n\"\n",
411
+ "\n",
412
+ "for ideal_task, counts in task_counts.items():\n",
413
+ " output_summary += f\"Ideal Task = OptionalTask_{ideal_task}:\\n\"\n",
414
+ " output_summary += f\" Only OptionalTask_1 done: {counts['only_opt1']}\\n\"\n",
415
+ " output_summary += f\" Only OptionalTask_2 done: {counts['only_opt2']}\\n\"\n",
416
+ " output_summary += f\" Both done: {counts['both']}\\n\"\n",
417
+ "\n",
418
+ "print(output_summary)\n"
419
+ ]
420
+ },
421
+ {
422
+ "cell_type": "code",
423
+ "execution_count": null,
424
+ "id": "65ad9383-741f-44eb-8e8f-853ee7bc52a2",
425
+ "metadata": {},
426
+ "outputs": [],
427
+ "source": []
428
+ }
429
+ ],
430
+ "metadata": {
431
+ "kernelspec": {
432
+ "display_name": "Python 3 (ipykernel)",
433
+ "language": "python",
434
+ "name": "python3"
435
+ },
436
+ "language_info": {
437
+ "codemirror_mode": {
438
+ "name": "ipython",
439
+ "version": 3
440
+ },
441
+ "file_extension": ".py",
442
+ "mimetype": "text/x-python",
443
+ "name": "python",
444
+ "nbconvert_exporter": "python",
445
+ "pygments_lexer": "ipython3",
446
+ "version": "3.12.4"
447
+ }
448
+ },
449
+ "nbformat": 4,
450
+ "nbformat_minor": 5
451
+ }
new_test_saved_finetuned_model.py CHANGED
@@ -221,9 +221,12 @@ class BERTFineTuneTrainer:
221
  for key, value in final_msg.items():
222
  file.write(f"{key}: {value}\n")
223
  print(final_msg)
 
224
  fpr, tpr, thresholds = roc_curve(tlabels, positive_class_probs)
225
  with open("roc_data.pkl", "wb") as f:
226
  pickle.dump((fpr, tpr, thresholds), f)
 
 
227
  print(final_msg)
228
  f.close()
229
  with open(self.log_folder_path+f"/log_{phase}_finetuned_info.txt", 'a') as f1:
@@ -426,6 +429,7 @@ class BERTFineTuneCalibratedTrainer:
426
  auc_score = roc_auc_score(tlabels, positive_class_probs)
427
  end_time = time.time()
428
  final_msg = {
 
429
  "avg_loss": avg_loss / len(data_iter),
430
  "total_acc": total_correct * 100.0 / total_element,
431
  "precisions": precisions,
@@ -441,8 +445,7 @@ class BERTFineTuneCalibratedTrainer:
441
  for key, value in final_msg.items():
442
  file.write(f"{key}: {value}\n")
443
  with open("plabels.txt","w") as file:
444
- file.write(plabels)
445
-
446
  print(final_msg)
447
  fpr, tpr, thresholds = roc_curve(tlabels, positive_class_probs)
448
  f.close()
 
221
  for key, value in final_msg.items():
222
  file.write(f"{key}: {value}\n")
223
  print(final_msg)
224
+ # print(type(plabels),type(tlabels),plabels,tlabels)
225
  fpr, tpr, thresholds = roc_curve(tlabels, positive_class_probs)
226
  with open("roc_data.pkl", "wb") as f:
227
  pickle.dump((fpr, tpr, thresholds), f)
228
+ with open("roc_data2.pkl", "wb") as f:
229
+ pickle.dump((tlabels,positive_class_probs), f)
230
  print(final_msg)
231
  f.close()
232
  with open(self.log_folder_path+f"/log_{phase}_finetuned_info.txt", 'a') as f1:
 
429
  auc_score = roc_auc_score(tlabels, positive_class_probs)
430
  end_time = time.time()
431
  final_msg = {
432
+ "this one":"this one",
433
  "avg_loss": avg_loss / len(data_iter),
434
  "total_acc": total_correct * 100.0 / total_element,
435
  "precisions": precisions,
 
445
  for key, value in final_msg.items():
446
  file.write(f"{key}: {value}\n")
447
  with open("plabels.txt","w") as file:
448
+ file.write(plabels)
 
449
  print(final_msg)
450
  fpr, tpr, thresholds = roc_curve(tlabels, positive_class_probs)
451
  f.close()
result.txt CHANGED
@@ -3,5 +3,5 @@ total_acc: 69.00702106318957
3
  precisions: 0.7236623191454734
4
  recalls: 0.6900702106318957
5
  f1_scores: 0.6802420656474512
6
- time_taken_from_start: 30.22950553894043
7
  auc_score: 0.7457100293916334
 
3
  precisions: 0.7236623191454734
4
  recalls: 0.6900702106318957
5
  f1_scores: 0.6802420656474512
6
+ time_taken_from_start: 21.604072332382202
7
  auc_score: 0.7457100293916334
roc_data2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41fa9d96833c12979f8495141ee61c0ba07d4a20c5fb5bc18a7f72bf4d15e8fd
3
+ size 28023