Fix metrics
Browse files- src/about.py +8 -11
src/about.py
CHANGED
@@ -17,17 +17,14 @@ class Task:
|
|
17 |
# ---------------------------------------------------
|
18 |
class Tasks(Enum):
|
19 |
# task_key, metric_key, title
|
20 |
-
task00 = Task("human_eval_solidity", "score", "HumanEval for Solidity")
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
task05 = Task("
|
26 |
-
task06 = Task("
|
27 |
-
task07 = Task("
|
28 |
-
task08 = Task("rougeLsum", "score", "ROUGE-Lsum")
|
29 |
-
task09 = Task("bleu", "score", "Bleu")
|
30 |
-
task10 = Task("brevity_penalty", "score", "Brevity Penalty")
|
31 |
# ---------------------------------------------------
|
32 |
|
33 |
# Your leaderboard name
|
|
|
17 |
# ---------------------------------------------------
|
18 |
class Tasks(Enum):
|
19 |
# task_key, metric_key, title
|
20 |
+
# task00 = Task("human_eval_solidity", "score", "HumanEval for Solidity")
|
21 |
+
task01 = Task("naive_judge", "score", "NaïveJudge")
|
22 |
+
task02 = Task("rouge1", "score", "ROUGE-unigrams")
|
23 |
+
task03 = Task("rouge2", "score", "ROUGE-bigrams")
|
24 |
+
task04 = Task("rougeL", "score", "ROUGE-Longest Common Subsequence")
|
25 |
+
task05 = Task("rougeLsum", "score", "ROUGE-Lsum")
|
26 |
+
task06 = Task("bleu", "score", "Bleu")
|
27 |
+
task07 = Task("brevity_penalty", "score", "Brevity Penalty")
|
|
|
|
|
|
|
28 |
# ---------------------------------------------------
|
29 |
|
30 |
# Your leaderboard name
|