lvwerra HF staff commited on
Commit
bffd289
1 Parent(s): 551fef4

Update Space (evaluate main: c447fc8e)

Browse files
Files changed (2) hide show
  1. pearsonr.py +3 -18
  2. requirements.txt +1 -1
pearsonr.py CHANGED
@@ -13,8 +13,6 @@
13
  # limitations under the License.
14
  """Pearson correlation coefficient metric."""
15
 
16
- from dataclasses import dataclass
17
-
18
  import datasets
19
  from scipy.stats import pearsonr
20
 
@@ -85,26 +83,13 @@ doi = {10.1038/s41592-019-0686-2},
85
  """
86
 
87
 
88
- @dataclass
89
- class PearsonrConfig(evaluate.info.Config):
90
-
91
- name: str = "default"
92
-
93
- return_pvalue: bool = True
94
-
95
-
96
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
97
  class Pearsonr(evaluate.Metric):
98
-
99
- CONFIG_CLASS = PearsonrConfig
100
- ALLOWED_CONFIG_NAMES = ["default"]
101
-
102
- def _info(self, config):
103
  return evaluate.MetricInfo(
104
  description=_DESCRIPTION,
105
  citation=_CITATION,
106
  inputs_description=_KWARGS_DESCRIPTION,
107
- config=config,
108
  features=datasets.Features(
109
  {
110
  "predictions": datasets.Value("float"),
@@ -114,8 +99,8 @@ class Pearsonr(evaluate.Metric):
114
  reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"],
115
  )
116
 
117
- def _compute(self, predictions, references):
118
- if self.config.return_pvalue:
119
  results = pearsonr(references, predictions)
120
  return {"pearsonr": results[0], "p-value": results[1]}
121
  else:
 
13
  # limitations under the License.
14
  """Pearson correlation coefficient metric."""
15
 
 
 
16
  import datasets
17
  from scipy.stats import pearsonr
18
 
 
83
  """
84
 
85
 
 
 
 
 
 
 
 
 
86
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
87
  class Pearsonr(evaluate.Metric):
88
+ def _info(self):
 
 
 
 
89
  return evaluate.MetricInfo(
90
  description=_DESCRIPTION,
91
  citation=_CITATION,
92
  inputs_description=_KWARGS_DESCRIPTION,
 
93
  features=datasets.Features(
94
  {
95
  "predictions": datasets.Value("float"),
 
99
  reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"],
100
  )
101
 
102
+ def _compute(self, predictions, references, return_pvalue=False):
103
+ if return_pvalue:
104
  results = pearsonr(references, predictions)
105
  return {"pearsonr": results[0], "p-value": results[1]}
106
  else:
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@e4a2724377909fe2aeb4357e3971e5a569673b39
2
  scipy
 
1
+ git+https://github.com/huggingface/evaluate@c447fc8eda9c62af501bfdc6988919571050d950
2
  scipy