yujonglee commited on
Commit
9c1ad43
1 Parent(s): f44cb47

Add empty test

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. mean_average_precision.py +9 -12
  3. tests.py +14 -12
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
mean_average_precision.py CHANGED
@@ -70,15 +70,17 @@ class mean_average_precision(evaluate.Metric):
70
  citation=_CITATION,
71
  inputs_description=_KWARGS_DESCRIPTION,
72
  # This defines the format of each prediction and reference
73
- features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
- }),
 
 
77
  # Homepage of the module for documentation
78
  homepage="http://module.homepage",
79
  # Additional links to the codebase or references
80
  codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
  )
83
 
84
  def _download_and_prepare(self, dl_manager):
@@ -86,10 +88,5 @@ class mean_average_precision(evaluate.Metric):
86
  # TODO: Download external resources if needed
87
  pass
88
 
89
- def _compute(self, predictions, references):
90
- """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
93
- return {
94
- "accuracy": accuracy,
95
- }
 
70
  citation=_CITATION,
71
  inputs_description=_KWARGS_DESCRIPTION,
72
  # This defines the format of each prediction and reference
73
+ features=datasets.Features(
74
+ {
75
+ "predictions": datasets.Value("int64"),
76
+ "references": datasets.Value("int64"),
77
+ }
78
+ ),
79
  # Homepage of the module for documentation
80
  homepage="http://module.homepage",
81
  # Additional links to the codebase or references
82
  codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
83
+ reference_urls=["http://path.to.reference.url/new_module"],
84
  )
85
 
86
  def _download_and_prepare(self, dl_manager):
 
88
  # TODO: Download external resources if needed
89
  pass
90
 
91
+ def _compute(self, predictions, references, **kwargs):
92
+ return {"mean_average_precision": 1.0}
 
 
 
 
 
tests.py CHANGED
@@ -2,16 +2,18 @@ test_cases = [
2
  {
3
  "predictions": [0, 0],
4
  "references": [1, 1],
5
- "result": {"metric_score": 0}
6
  },
7
- {
8
- "predictions": [1, 1],
9
- "references": [1, 1],
10
- "result": {"metric_score": 1}
11
- },
12
- {
13
- "predictions": [1, 0],
14
- "references": [1, 1],
15
- "result": {"metric_score": 0.5}
16
- }
17
- ]
 
 
 
2
  {
3
  "predictions": [0, 0],
4
  "references": [1, 1],
5
+ "result": {"mean_average_precision": 1.0},
6
  },
7
+ ]
8
+
9
+ from mean_average_precision import mean_average_precision
10
+
11
+ m = mean_average_precision()
12
+
13
+ for test_case in test_cases:
14
+ assert (
15
+ m.compute(
16
+ predictions=test_case["predictions"], references=test_case["references"]
17
+ )
18
+ == test_case["result"]
19
+ )