Datasets:
Sub-tasks:
hate-speech-detection
Languages:
English
Size:
100K<n<1M
Tags:
explanation-generation
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- dataset_infos.json +1 -1
- social_bias_frames.py +15 -10
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- crowdsourced
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: Social Bias Frames
|
3 |
annotations_creators:
|
4 |
- crowdsourced
|
5 |
language_creators:
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": "Social Bias Frames is a new way of representing the biases and offensiveness that are implied in language.\nFor example, these frames are meant to distill the implication that \"women (candidates) are less qualified\"\nbehind the statement \"we shouldn\u2019t lower our standards to hire more women.\"\n", "citation": "@inproceedings{sap2020socialbiasframes,\n title={Social Bias Frames: Reasoning about Social and Power Implications of Language},\n author={Sap, Maarten and Gabriel, Saadia and Qin, Lianhui and Jurafsky, Dan and Smith, Noah A and Choi, Yejin},\n year={2020},\n booktitle={ACL},\n}\n", "homepage": "https://homes.cs.washington.edu/~msap/social-bias-frames/", "license": "", "features": {"whoTarget": {"dtype": "string", "id": null, "_type": "Value"}, "intentYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexReason": {"dtype": "string", "id": null, "_type": "Value"}, "offensiveYN": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorGender": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorMinority": {"dtype": "string", "id": null, "_type": "Value"}, "sexPhrase": {"dtype": "string", "id": null, "_type": "Value"}, "speakerMinorityYN": {"dtype": "string", "id": null, "_type": "Value"}, "WorkerId": {"dtype": "string", "id": null, "_type": "Value"}, "HITId": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorPolitics": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorRace": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorAge": {"dtype": "string", "id": null, "_type": "Value"}, "post": {"dtype": "string", "id": null, "_type": "Value"}, "targetMinority": {"dtype": "string", "id": null, "_type": "Value"}, "targetCategory": {"dtype": "string", "id": null, "_type": "Value"}, "targetStereotype": {"dtype": "string", "id": null, "_type": "Value"}, "dataSource": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "social_bias_frames", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5371665, "num_examples": 17501, "dataset_name": "social_bias_frames"}, "validation": {"name": "validation", "num_bytes": 5096009, "num_examples": 16738, "dataset_name": "social_bias_frames"}, "train": {"name": "train", "num_bytes": 34006886, "num_examples": 112900, "dataset_name": "social_bias_frames"}}, "download_checksums": {"https://homes.cs.washington.edu/~msap/social-bias-frames/SBIC.v2.tgz": {"num_bytes":
|
|
|
1 |
+
{"default": {"description": "Social Bias Frames is a new way of representing the biases and offensiveness that are implied in language.\nFor example, these frames are meant to distill the implication that \"women (candidates) are less qualified\"\nbehind the statement \"we shouldn\u2019t lower our standards to hire more women.\"\n", "citation": "@inproceedings{sap2020socialbiasframes,\n title={Social Bias Frames: Reasoning about Social and Power Implications of Language},\n author={Sap, Maarten and Gabriel, Saadia and Qin, Lianhui and Jurafsky, Dan and Smith, Noah A and Choi, Yejin},\n year={2020},\n booktitle={ACL},\n}\n", "homepage": "https://homes.cs.washington.edu/~msap/social-bias-frames/", "license": "", "features": {"whoTarget": {"dtype": "string", "id": null, "_type": "Value"}, "intentYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexReason": {"dtype": "string", "id": null, "_type": "Value"}, "offensiveYN": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorGender": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorMinority": {"dtype": "string", "id": null, "_type": "Value"}, "sexPhrase": {"dtype": "string", "id": null, "_type": "Value"}, "speakerMinorityYN": {"dtype": "string", "id": null, "_type": "Value"}, "WorkerId": {"dtype": "string", "id": null, "_type": "Value"}, "HITId": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorPolitics": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorRace": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorAge": {"dtype": "string", "id": null, "_type": "Value"}, "post": {"dtype": "string", "id": null, "_type": "Value"}, "targetMinority": {"dtype": "string", "id": null, "_type": "Value"}, "targetCategory": {"dtype": "string", "id": null, "_type": "Value"}, "targetStereotype": {"dtype": "string", "id": null, "_type": "Value"}, "dataSource": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "social_bias_frames", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5371665, "num_examples": 17501, "dataset_name": "social_bias_frames"}, "validation": {"name": "validation", "num_bytes": 5096009, "num_examples": 16738, "dataset_name": "social_bias_frames"}, "train": {"name": "train", "num_bytes": 34006886, "num_examples": 112900, "dataset_name": "social_bias_frames"}}, "download_checksums": {"https://homes.cs.washington.edu/~msap/social-bias-frames/SBIC.v2.tgz": {"num_bytes": 9464583, "checksum": "07cabae1ee0289392ea11b494c0012578bd39a582d58dc5ebd8edc3deda6bc5b"}}, "download_size": 9464583, "post_processing_size": null, "dataset_size": 44474560, "size_in_bytes": 53939143}}
|
social_bias_frames.py
CHANGED
@@ -18,7 +18,6 @@
|
|
18 |
|
19 |
|
20 |
import csv
|
21 |
-
import os
|
22 |
|
23 |
import datasets
|
24 |
|
@@ -78,22 +77,28 @@ class SocialBiasFrames(datasets.GeneratorBasedBuilder):
|
|
78 |
)
|
79 |
|
80 |
def _split_generators(self, dl_manager):
|
81 |
-
|
82 |
return [
|
83 |
datasets.SplitGenerator(
|
84 |
-
name=datasets.Split.TEST,
|
|
|
85 |
),
|
86 |
datasets.SplitGenerator(
|
87 |
-
name=datasets.Split.VALIDATION,
|
|
|
88 |
),
|
89 |
datasets.SplitGenerator(
|
90 |
-
name=datasets.Split.TRAIN,
|
|
|
91 |
),
|
92 |
]
|
93 |
|
94 |
-
def _generate_examples(self, filepath):
|
95 |
"""This function returns the examples in the raw (text) form."""
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
|
18 |
|
19 |
|
20 |
import csv
|
|
|
21 |
|
22 |
import datasets
|
23 |
|
|
|
77 |
)
|
78 |
|
79 |
def _split_generators(self, dl_manager):
|
80 |
+
archive = dl_manager.download(_DATA_URL)
|
81 |
return [
|
82 |
datasets.SplitGenerator(
|
83 |
+
name=datasets.Split.TEST,
|
84 |
+
gen_kwargs={"filepath": "SBIC.v2.tst.csv", "files": dl_manager.iter_archive(archive)},
|
85 |
),
|
86 |
datasets.SplitGenerator(
|
87 |
+
name=datasets.Split.VALIDATION,
|
88 |
+
gen_kwargs={"filepath": "SBIC.v2.dev.csv", "files": dl_manager.iter_archive(archive)},
|
89 |
),
|
90 |
datasets.SplitGenerator(
|
91 |
+
name=datasets.Split.TRAIN,
|
92 |
+
gen_kwargs={"filepath": "SBIC.v2.trn.csv", "files": dl_manager.iter_archive(archive)},
|
93 |
),
|
94 |
]
|
95 |
|
96 |
+
def _generate_examples(self, filepath, files):
|
97 |
"""This function returns the examples in the raw (text) form."""
|
98 |
+
for path, f in files:
|
99 |
+
if path == filepath:
|
100 |
+
lines = (line.decode("utf-8") for line in f)
|
101 |
+
reader = csv.DictReader(lines)
|
102 |
+
for idx, row in enumerate(reader):
|
103 |
+
yield idx, row
|
104 |
+
break
|