Vlasta commited on
Commit
1d7fac4
1 Parent(s): b377cc7

Fix parameter doc

Browse files
Files changed (1) hide show
  1. pr_auc.py +4 -17
pr_auc.py CHANGED
@@ -18,7 +18,6 @@ import datasets
18
  from sklearn.metrics import precision_recall_curve, auc
19
 
20
 
21
- # TODO: Add BibTeX citation
22
  _CITATION = """\
23
  @InProceedings{huggingface:module,
24
  title = {A great new module},
@@ -27,7 +26,6 @@ year={2020}
27
  }
28
  """
29
 
30
- # TODO: Add description of the module here
31
  _DESCRIPTION = """\
32
  Computes the area under precision-recall curve. Implementation details taken from https://sinyi-chou.github.io/python-sklearn-precision-recall/
33
  """
@@ -37,31 +35,20 @@ Computes the area under precision-recall curve. Implementation details taken fro
37
  _KWARGS_DESCRIPTION = """
38
  Calculates how good are predictions given some references, using certain scores
39
  Args:
40
- predictions: list of predictions to score. Each predictions
41
- should be a string with tokens separated by spaces.
42
  references: list of reference for each prediction. Each
43
  reference should be a string with tokens separated by spaces.
44
  Returns:
45
- accuracy: description of the first score,
46
- another_score: description of the second score,
47
  Examples:
48
- Examples should be written in doctest format, and should illustrate how
49
- to use the function.
50
-
51
- >>> my_new_module = evaluate.load("my_new_module")
52
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
53
- >>> print(results)
54
- {'accuracy': 1.0}
55
  """
56
 
57
- # TODO: Define external resources urls if needed
58
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
59
 
60
 
61
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
62
  class PRAUC(evaluate.Metric):
63
- """TODO: Short description of my evaluation module."""
64
-
65
  def _info(self):
66
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
67
  return evaluate.MetricInfo(
 
18
  from sklearn.metrics import precision_recall_curve, auc
19
 
20
 
 
21
  _CITATION = """\
22
  @InProceedings{huggingface:module,
23
  title = {A great new module},
 
26
  }
27
  """
28
 
 
29
  _DESCRIPTION = """\
30
  Computes the area under precision-recall curve. Implementation details taken from https://sinyi-chou.github.io/python-sklearn-precision-recall/
31
  """
 
35
  _KWARGS_DESCRIPTION = """
36
  Calculates how good are predictions given some references, using certain scores
37
  Args:
38
+ prediction_scores: Model predictions
 
39
  references: list of reference for each prediction. Each
40
  reference should be a string with tokens separated by spaces.
41
  Returns:
42
+ pr_auc: area under the precision-recall curve,
 
43
  Examples:
44
+ No examples
 
 
 
 
 
 
45
  """
46
 
47
+ BAD_WORDS_URL = ""
 
48
 
49
 
50
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
51
  class PRAUC(evaluate.Metric):
 
 
52
  def _info(self):
53
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
54
  return evaluate.MetricInfo(