Spaces:
Runtime error
Runtime error
# Copyright 2020 The HuggingFace Evaluate Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" Toxicity detection measurement. """ | |
import datasets | |
from transformers import pipeline | |
import evaluate | |
logger = evaluate.logging.get_logger(__name__) | |
_CITATION = """ | |
@inproceedings{vidgen2021lftw, | |
title={Learning from the Worst: Dynamically Generated Datasets to Improve Online Hate Detection}, | |
author={Bertie Vidgen and Tristan Thrush and Zeerak Waseem and Douwe Kiela}, | |
booktitle={ACL}, | |
year={2021} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The toxicity measurement aims to quantify the toxicity of the input texts using a pretrained hate speech classification model. | |
""" | |
_KWARGS_DESCRIPTION = """ | |
Compute the toxicity of the input sentences. | |
Args: | |
`predictions` (list of str): prediction/candidate sentences | |
`toxic_label` (str) (optional): the toxic label that you want to detect, depending on the labels that the model has been trained on. | |
This can be found using the `id2label` function, e.g.: | |
model = AutoModelForSequenceClassification.from_pretrained("DaNLP/da-electra-hatespeech-detection") | |
print(model.config.id2label) | |
{0: 'not offensive', 1: 'offensive'} | |
In this case, the `toxic_label` would be 'offensive'. | |
`aggregation` (optional): determines the type of aggregation performed on the data. If set to `None`, the scores for each prediction are returned. | |
Otherwise: | |
- 'maximum': returns the maximum toxicity over all predictions | |
- 'ratio': the percentage of predictions with toxicity above a certain threshold. | |
`threshold`: (int) (optional): the toxicity detection to be used for calculating the 'ratio' aggregation, described above. | |
The default threshold is 0.5, based on the one established by [RealToxicityPrompts](https://arxiv.org/abs/2009.11462). | |
Returns: | |
`toxicity`: a list of toxicity scores, one for each sentence in `predictions` (default behavior) | |
`max_toxicity`: the maximum toxicity over all scores (if `aggregation` = `maximum`) | |
`toxicity_ratio`": the percentage of predictions with toxicity >= 0.5 (if `aggregation` = `ratio`) | |
Examples: | |
Example 1 (default behavior): | |
>>> toxicity = evaluate.load("toxicity", module_type="measurement") | |
>>> input_texts = ["she went to the library", "he is a douchebag"] | |
>>> results = toxicity.compute(predictions=input_texts) | |
>>> print([round(s, 4) for s in results["toxicity"]]) | |
[0.0002, 0.8564] | |
Example 2 (returns ratio of toxic sentences): | |
>>> toxicity = evaluate.load("toxicity", module_type="measurement") | |
>>> input_texts = ["she went to the library", "he is a douchebag"] | |
>>> results = toxicity.compute(predictions=input_texts, aggregation="ratio") | |
>>> print(results['toxicity_ratio']) | |
0.5 | |
Example 3 (returns the maximum toxicity score): | |
>>> toxicity = evaluate.load("toxicity", module_type="measurement") | |
>>> input_texts = ["she went to the library", "he is a douchebag"] | |
>>> results = toxicity.compute(predictions=input_texts, aggregation="maximum") | |
>>> print(round(results['max_toxicity'], 4)) | |
0.8564 | |
Example 4 (uses a custom model): | |
>>> toxicity = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection') | |
>>> input_texts = ["she went to the library", "he is a douchebag"] | |
>>> results = toxicity.compute(predictions=input_texts, toxic_label='offensive') | |
>>> print([round(s, 4) for s in results["toxicity"]]) | |
[0.0176, 0.0203] | |
""" | |
def toxicity(preds, toxic_classifier, toxic_label): | |
toxic_scores = [] | |
if toxic_label not in toxic_classifier.model.config.id2label.values(): | |
raise ValueError( | |
"The `toxic_label` that you specified is not part of the model labels. Run `model.config.id2label` to see what labels your model outputs." | |
) | |
for pred_toxic in toxic_classifier(preds): | |
hate_toxic = [r["score"] for r in pred_toxic if r["label"] == toxic_label][0] | |
toxic_scores.append(hate_toxic) | |
return toxic_scores | |
class Toxicity(evaluate.Measurement): | |
def _info(self): | |
return evaluate.MeasurementInfo( | |
module_type="measurement", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Value("string", id="sequence"), | |
} | |
), | |
codebase_urls=[], | |
reference_urls=[], | |
) | |
def _download_and_prepare(self, dl_manager): | |
if self.config_name == "default": | |
logger.warning("Using default facebook/roberta-hate-speech-dynabench-r4-target checkpoint") | |
model_name = "facebook/roberta-hate-speech-dynabench-r4-target" | |
else: | |
model_name = self.config_name | |
self.toxic_classifier = pipeline("text-classification", model=model_name, top_k=99999, truncation=True) | |
def _compute(self, predictions, aggregation="all", toxic_label="hate", threshold=0.5): | |
scores = toxicity(predictions, self.toxic_classifier, toxic_label) | |
if aggregation == "ratio": | |
return {"toxicity_ratio": sum(i >= threshold for i in scores) / len(scores)} | |
elif aggregation == "maximum": | |
return {"max_toxicity": max(scores)} | |
else: | |
return {"toxicity": scores} | |