lvwerra HF staff commited on
Commit
6381dc5
1 Parent(s): 51c4792

Update Space (evaluate main: e4a27243)

Browse files
Files changed (2) hide show
  1. regard.py +19 -6
  2. requirements.txt +1 -1
regard.py CHANGED
@@ -15,8 +15,10 @@
15
  """ Regard measurement. """
16
 
17
  from collections import defaultdict
 
18
  from operator import itemgetter
19
  from statistics import mean
 
20
 
21
  import datasets
22
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
@@ -115,9 +117,20 @@ def regard(group, regard_classifier):
115
  return group_regard, dict(group_scores)
116
 
117
 
 
 
 
 
 
 
 
 
118
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
119
  class Regard(evaluate.Measurement):
120
- def _info(self):
 
 
 
121
  if self.config_name not in ["compare", "default"]:
122
  raise KeyError("You should supply a configuration name selected in " '["config", "default"]')
123
  return evaluate.MeasurementInfo(
@@ -125,6 +138,7 @@ class Regard(evaluate.Measurement):
125
  description=_DESCRIPTION,
126
  citation=_CITATION,
127
  inputs_description=_KWARGS_DESCRIPTION,
 
128
  features=datasets.Features(
129
  {
130
  "data": datasets.Value("string", id="sequence"),
@@ -150,7 +164,6 @@ class Regard(evaluate.Measurement):
150
  self,
151
  data,
152
  references=None,
153
- aggregation=None,
154
  ):
155
  if self.config_name == "compare":
156
  pred_scores, pred_regard = regard(data, self.regard_classifier)
@@ -159,12 +172,12 @@ class Regard(evaluate.Measurement):
159
  pred_max = {k: max(v) for k, v in pred_regard.items()}
160
  ref_mean = {k: mean(v) for k, v in ref_regard.items()}
161
  ref_max = {k: max(v) for k, v in ref_regard.items()}
162
- if aggregation == "maximum":
163
  return {
164
  "max_data_regard": pred_max,
165
  "max_references_regard": ref_max,
166
  }
167
- elif aggregation == "average":
168
  return {"average_data_regard": pred_mean, "average_references_regard": ref_mean}
169
  else:
170
  return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}}
@@ -172,9 +185,9 @@ class Regard(evaluate.Measurement):
172
  pred_scores, pred_regard = regard(data, self.regard_classifier)
173
  pred_mean = {k: mean(v) for k, v in pred_regard.items()}
174
  pred_max = {k: max(v) for k, v in pred_regard.items()}
175
- if aggregation == "maximum":
176
  return {"max_regard": pred_max}
177
- elif aggregation == "average":
178
  return {"average_regard": pred_mean}
179
  else:
180
  return {"regard": pred_scores}
 
15
  """ Regard measurement. """
16
 
17
  from collections import defaultdict
18
+ from dataclasses import dataclass
19
  from operator import itemgetter
20
  from statistics import mean
21
+ from typing import Optional
22
 
23
  import datasets
24
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
 
117
  return group_regard, dict(group_scores)
118
 
119
 
120
+ @dataclass
121
+ class RegardConfig(evaluate.info.Config):
122
+
123
+ name: str = "default"
124
+
125
+ aggregation: Optional[str] = None
126
+
127
+
128
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
129
  class Regard(evaluate.Measurement):
130
+ CONFIG_CLASS = RegardConfig
131
+ ALLOWED_CONFIG_NAMES = ["default", "compare"]
132
+
133
+ def _info(self, config):
134
  if self.config_name not in ["compare", "default"]:
135
  raise KeyError("You should supply a configuration name selected in " '["config", "default"]')
136
  return evaluate.MeasurementInfo(
 
138
  description=_DESCRIPTION,
139
  citation=_CITATION,
140
  inputs_description=_KWARGS_DESCRIPTION,
141
+ config=config,
142
  features=datasets.Features(
143
  {
144
  "data": datasets.Value("string", id="sequence"),
 
164
  self,
165
  data,
166
  references=None,
 
167
  ):
168
  if self.config_name == "compare":
169
  pred_scores, pred_regard = regard(data, self.regard_classifier)
 
172
  pred_max = {k: max(v) for k, v in pred_regard.items()}
173
  ref_mean = {k: mean(v) for k, v in ref_regard.items()}
174
  ref_max = {k: max(v) for k, v in ref_regard.items()}
175
+ if self.config.aggregation == "maximum":
176
  return {
177
  "max_data_regard": pred_max,
178
  "max_references_regard": ref_max,
179
  }
180
+ elif self.config.aggregation == "average":
181
  return {"average_data_regard": pred_mean, "average_references_regard": ref_mean}
182
  else:
183
  return {"regard_difference": {key: pred_mean[key] - ref_mean.get(key, 0) for key in pred_mean}}
 
185
  pred_scores, pred_regard = regard(data, self.regard_classifier)
186
  pred_mean = {k: mean(v) for k, v in pred_regard.items()}
187
  pred_max = {k: max(v) for k, v in pred_regard.items()}
188
+ if self.config.aggregation == "maximum":
189
  return {"max_regard": pred_max}
190
+ elif self.config.aggregation == "average":
191
  return {"average_regard": pred_mean}
192
  else:
193
  return {"regard": pred_scores}
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- git+https://github.com/huggingface/evaluate.git@80448674f5447a9682afe051db243c4a13bfe4ff
2
  transformers
 
1
+ git+https://github.com/huggingface/evaluate.git@e4a2724377909fe2aeb4357e3971e5a569673b39
2
  transformers