Spaces:
Running
Running
update metric
Browse files- apps_metric.py +3 -3
apps_metric.py
CHANGED
@@ -44,7 +44,7 @@ Returns:
|
|
44 |
metrics: dict of three metrics: average accuracy, stric accuracy, and pass@k.
|
45 |
Examples:
|
46 |
>>> my_new_module = evaluate.load("loubnabnl/apps_metric")
|
47 |
-
>>> results = my_new_module.compute(references=["s=
|
48 |
>>> print(results)
|
49 |
{'avg_accuracy': 0, 'strict_accuracy': 0, 'pass_at_k': None}
|
50 |
"""
|
@@ -75,7 +75,7 @@ class apps_metric(evaluate.EvaluationModule):
|
|
75 |
|
76 |
|
77 |
|
78 |
-
def _compute(self,
|
79 |
"""Returns the scores"""
|
80 |
-
metrics = compute_metrics(
|
81 |
return metrics
|
|
|
44 |
metrics: dict of three metrics: average accuracy, stric accuracy, and pass@k.
|
45 |
Examples:
|
46 |
>>> my_new_module = evaluate.load("loubnabnl/apps_metric")
|
47 |
+
>>> results = my_new_module.compute(references=[["s=input()\nprint(s)"]])
|
48 |
>>> print(results)
|
49 |
{'avg_accuracy': 0, 'strict_accuracy': 0, 'pass_at_k': None}
|
50 |
"""
|
|
|
75 |
|
76 |
|
77 |
|
78 |
+
def _compute(self, predictions, k_list=[1, 10, 100], count_errors=True, level="all"):
|
79 |
"""Returns the scores"""
|
80 |
+
metrics = compute_metrics(predictions, k_list=k_list, count_errors=count_errors, level=level)
|
81 |
return metrics
|