lvwerra HF staff commited on
Commit
fd5d98c
1 Parent(s): 86f8415

Update Space (evaluate main: e4a27243)

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. requirements.txt +1 -1
  3. toxicity.py +36 -20
README.md CHANGED
@@ -30,7 +30,7 @@ The model should be compatible with the AutoModelForSequenceClassification class
30
  For more information, see [the AutoModelForSequenceClassification documentation]( https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForSequenceClassification).
31
 
32
  Args:
33
- `predictions` (list of str): prediction/candidate sentences
34
  `toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on.
35
  This can be found using the `id2label` function, e.g.:
36
  ```python
@@ -47,7 +47,7 @@ Args:
47
 
48
  ## Output values
49
 
50
- `toxicity`: a list of toxicity scores, one for each sentence in `predictions` (default behavior)
51
 
52
  `max_toxicity`: the maximum toxicity over all scores (if `aggregation` = `maximum`)
53
 
@@ -62,7 +62,7 @@ Args:
62
  ```python
63
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
64
  >>> input_texts = ["she went to the library", "he is a douchebag"]
65
- >>> results = toxicity.compute(predictions=input_texts)
66
  >>> print([round(s, 4) for s in results["toxicity"]])
67
  [0.0002, 0.8564]
68
  ```
@@ -70,7 +70,7 @@ Args:
70
  ```python
71
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
72
  >>> input_texts = ["she went to the library", "he is a douchebag"]
73
- >>> results = toxicity.compute(predictions=input_texts, aggregation="ratio")
74
  >>> print(results['toxicity_ratio'])
75
  0.5
76
  ```
@@ -78,15 +78,15 @@ Args:
78
  ```python
79
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
80
  >>> input_texts = ["she went to the library", "he is a douchebag"]
81
- >>> results = toxicity.compute(predictions=input_texts, aggregation="maximum")
82
  >>> print(round(results['max_toxicity'], 4))
83
  0.8564
84
  ```
85
  Example 4 (uses a custom model):
86
  ```python
87
- >>> toxicity = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection')
88
  >>> input_texts = ["she went to the library", "he is a douchebag"]
89
- >>> results = toxicity.compute(predictions=input_texts, toxic_label='offensive')
90
  >>> print([round(s, 4) for s in results["toxicity"]])
91
  [0.0176, 0.0203]
92
  ```
 
30
  For more information, see [the AutoModelForSequenceClassification documentation]( https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForSequenceClassification).
31
 
32
  Args:
33
+ `data` (list of str): prediction/candidate sentences
34
  `toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on.
35
  This can be found using the `id2label` function, e.g.:
36
  ```python
 
47
 
48
  ## Output values
49
 
50
+ `toxicity`: a list of toxicity scores, one for each sentence in `data` (default behavior)
51
 
52
  `max_toxicity`: the maximum toxicity over all scores (if `aggregation` = `maximum`)
53
 
 
62
  ```python
63
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
64
  >>> input_texts = ["she went to the library", "he is a douchebag"]
65
+ >>> results = toxicity.compute(data=input_texts)
66
  >>> print([round(s, 4) for s in results["toxicity"]])
67
  [0.0002, 0.8564]
68
  ```
 
70
  ```python
71
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
72
  >>> input_texts = ["she went to the library", "he is a douchebag"]
73
+ >>> results = toxicity.compute(data=input_texts, aggregation="ratio")
74
  >>> print(results['toxicity_ratio'])
75
  0.5
76
  ```
 
78
  ```python
79
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
80
  >>> input_texts = ["she went to the library", "he is a douchebag"]
81
+ >>> results = toxicity.compute(data=input_texts, aggregation="maximum")
82
  >>> print(round(results['max_toxicity'], 4))
83
  0.8564
84
  ```
85
  Example 4 (uses a custom model):
86
  ```python
87
+ >>> toxicity = evaluate.load("toxicity", model_name='DaNLP/da-electra-hatespeech-detection')
88
  >>> input_texts = ["she went to the library", "he is a douchebag"]
89
+ >>> results = toxicity.compute(data=input_texts, toxic_label='offensive')
90
  >>> print([round(s, 4) for s in results["toxicity"]])
91
  [0.0176, 0.0203]
92
  ```
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@80448674f5447a9682afe051db243c4a13bfe4ff
2
  transformers
 
1
+ git+https://github.com/huggingface/evaluate@e4a2724377909fe2aeb4357e3971e5a569673b39
2
  transformers
toxicity.py CHANGED
@@ -14,6 +14,8 @@
14
 
15
  """ Toxicity detection measurement. """
16
 
 
 
17
  import datasets
18
  from transformers import pipeline
19
 
@@ -40,7 +42,7 @@ _KWARGS_DESCRIPTION = """
40
  Compute the toxicity of the input sentences.
41
 
42
  Args:
43
- `predictions` (list of str): prediction/candidate sentences
44
  `toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on.
45
  This can be found using the `id2label` function, e.g.:
46
  model = AutoModelForSequenceClassification.from_pretrained("DaNLP/da-electra-hatespeech-detection")
@@ -64,14 +66,14 @@ Examples:
64
  Example 1 (default behavior):
65
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
66
  >>> input_texts = ["she went to the library", "he is a douchebag"]
67
- >>> results = toxicity.compute(predictions=input_texts)
68
  >>> print([round(s, 4) for s in results["toxicity"]])
69
  [0.0002, 0.8564]
70
 
71
  Example 2 (returns ratio of toxic sentences):
72
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
73
  >>> input_texts = ["she went to the library", "he is a douchebag"]
74
- >>> results = toxicity.compute(predictions=input_texts, aggregation="ratio")
75
  >>> print(results['toxicity_ratio'])
76
  0.5
77
 
@@ -79,15 +81,15 @@ Examples:
79
 
80
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
81
  >>> input_texts = ["she went to the library", "he is a douchebag"]
82
- >>> results = toxicity.compute(predictions=input_texts, aggregation="maximum")
83
  >>> print(round(results['max_toxicity'], 4))
84
  0.8564
85
 
86
  Example 4 (uses a custom model):
87
 
88
- >>> toxicity = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection')
89
  >>> input_texts = ["she went to the library", "he is a douchebag"]
90
- >>> results = toxicity.compute(predictions=input_texts, toxic_label='offensive')
91
  >>> print([round(s, 4) for s in results["toxicity"]])
92
  [0.0176, 0.0203]
93
  """
@@ -106,17 +108,34 @@ def toxicity(preds, toxic_classifier, toxic_label):
106
  return toxic_scores
107
 
108
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
110
  class Toxicity(evaluate.Measurement):
111
- def _info(self):
 
 
 
 
112
  return evaluate.MeasurementInfo(
113
  module_type="measurement",
114
  description=_DESCRIPTION,
115
  citation=_CITATION,
116
  inputs_description=_KWARGS_DESCRIPTION,
 
117
  features=datasets.Features(
118
  {
119
- "predictions": datasets.Value("string", id="sequence"),
120
  }
121
  ),
122
  codebase_urls=[],
@@ -124,18 +143,15 @@ class Toxicity(evaluate.Measurement):
124
  )
125
 
126
  def _download_and_prepare(self, dl_manager):
127
- if self.config_name == "default":
128
- logger.warning("Using default facebook/roberta-hate-speech-dynabench-r4-target checkpoint")
129
- model_name = "facebook/roberta-hate-speech-dynabench-r4-target"
130
- else:
131
- model_name = self.config_name
132
- self.toxic_classifier = pipeline("text-classification", model=model_name, top_k=99999, truncation=True)
133
-
134
- def _compute(self, predictions, aggregation="all", toxic_label="hate", threshold=0.5):
135
- scores = toxicity(predictions, self.toxic_classifier, toxic_label)
136
- if aggregation == "ratio":
137
- return {"toxicity_ratio": sum(i >= threshold for i in scores) / len(scores)}
138
- elif aggregation == "maximum":
139
  return {"max_toxicity": max(scores)}
140
  else:
141
  return {"toxicity": scores}
 
14
 
15
  """ Toxicity detection measurement. """
16
 
17
+ from dataclasses import dataclass
18
+
19
  import datasets
20
  from transformers import pipeline
21
 
 
42
  Compute the toxicity of the input sentences.
43
 
44
  Args:
45
+ `data` (list of str): prediction/candidate sentences
46
  `toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on.
47
  This can be found using the `id2label` function, e.g.:
48
  model = AutoModelForSequenceClassification.from_pretrained("DaNLP/da-electra-hatespeech-detection")
 
66
  Example 1 (default behavior):
67
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
68
  >>> input_texts = ["she went to the library", "he is a douchebag"]
69
+ >>> results = toxicity.compute(data=input_texts)
70
  >>> print([round(s, 4) for s in results["toxicity"]])
71
  [0.0002, 0.8564]
72
 
73
  Example 2 (returns ratio of toxic sentences):
74
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
75
  >>> input_texts = ["she went to the library", "he is a douchebag"]
76
+ >>> results = toxicity.compute(data=input_texts, aggregation="ratio")
77
  >>> print(results['toxicity_ratio'])
78
  0.5
79
 
 
81
 
82
  >>> toxicity = evaluate.load("toxicity", module_type="measurement")
83
  >>> input_texts = ["she went to the library", "he is a douchebag"]
84
+ >>> results = toxicity.compute(data=input_texts, aggregation="maximum")
85
  >>> print(round(results['max_toxicity'], 4))
86
  0.8564
87
 
88
  Example 4 (uses a custom model):
89
 
90
+ >>> toxicity = evaluate.load("toxicity", model_name='DaNLP/da-electra-hatespeech-detection')
91
  >>> input_texts = ["she went to the library", "he is a douchebag"]
92
+ >>> results = toxicity.compute(data=input_texts, toxic_label='offensive')
93
  >>> print([round(s, 4) for s in results["toxicity"]])
94
  [0.0176, 0.0203]
95
  """
 
108
  return toxic_scores
109
 
110
 
111
+ @dataclass
112
+ @dataclass
113
+ class ToxicityConfig(evaluate.info.Config):
114
+
115
+ name: str = "default"
116
+
117
+ model_name: str = "facebook/roberta-hate-speech-dynabench-r4-target"
118
+ aggregation: str = "all"
119
+ toxic_label: str = "hate"
120
+ threshold: float = 0.5
121
+
122
+
123
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
124
  class Toxicity(evaluate.Measurement):
125
+
126
+ CONFIG_CLASS = ToxicityConfig
127
+ ALLOWED_CONFIG_NAMES = ["default"]
128
+
129
+ def _info(self, config):
130
  return evaluate.MeasurementInfo(
131
  module_type="measurement",
132
  description=_DESCRIPTION,
133
  citation=_CITATION,
134
  inputs_description=_KWARGS_DESCRIPTION,
135
+ config=config,
136
  features=datasets.Features(
137
  {
138
+ "data": datasets.Value("string", id="sequence"),
139
  }
140
  ),
141
  codebase_urls=[],
 
143
  )
144
 
145
  def _download_and_prepare(self, dl_manager):
146
+ self.toxic_classifier = pipeline(
147
+ "text-classification", model=self.config.model_name, top_k=99999, truncation=True
148
+ )
149
+
150
+ def _compute(self, data):
151
+ scores = toxicity(data, self.toxic_classifier, self.config.toxic_label)
152
+ if self.config.aggregation == "ratio":
153
+ return {"toxicity_ratio": sum(i >= self.config.threshold for i in scores) / len(scores)}
154
+ elif self.config.aggregation == "maximum":
 
 
 
155
  return {"max_toxicity": max(scores)}
156
  else:
157
  return {"toxicity": scores}