Spaces:
Running
Running
shounakpaul95
commited on
Commit
•
82fce5e
1
Parent(s):
1eb4ffa
Update eval_utils.py
Browse files- eval_utils.py +3 -2
eval_utils.py
CHANGED
@@ -238,7 +238,9 @@ def evaluate_pcr(gold_data, pred_data):
|
|
238 |
|
239 |
print(f"Micro-F1@{k} on IL-PCR test set:", f1)
|
240 |
|
241 |
-
|
|
|
|
|
242 |
|
243 |
|
244 |
def evaluate_summ(gold_data, pred_data):
|
@@ -478,7 +480,6 @@ def get_evaluation_scores(gold_data, submission_data):
|
|
478 |
if isinstance(result, str):
|
479 |
evaluation_results[task] = result
|
480 |
else:
|
481 |
-
print(task, result)
|
482 |
evaluation_results[task] = f"{result:.2f}"
|
483 |
|
484 |
blank_scores = {
|
|
|
238 |
|
239 |
print(f"Micro-F1@{k} on IL-PCR test set:", f1)
|
240 |
|
241 |
+
max_f1 = max(f1)
|
242 |
+
index_max = f1.index(max_f1) + 1
|
243 |
+
return f"{max_f1:.2f}@{index_max}"
|
244 |
|
245 |
|
246 |
def evaluate_summ(gold_data, pred_data):
|
|
|
480 |
if isinstance(result, str):
|
481 |
evaluation_results[task] = result
|
482 |
else:
|
|
|
483 |
evaluation_results[task] = f"{result:.2f}"
|
484 |
|
485 |
blank_scores = {
|