Datasets:
Sub-tasks:
hate-speech-detection
Languages:
English
Size:
100K<n<1M
Tags:
explanation-generation
License:
Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- dataset_infos.json +1 -1
- dummy/0.0.0/dummy_data.zip +2 -2
- social_bias_frames.py +5 -4
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": "Social Bias Frames is a new way of representing the biases and offensiveness that are implied in language
|
|
|
1 |
+
{"default": {"description": "Social Bias Frames is a new way of representing the biases and offensiveness that are implied in language.\nFor example, these frames are meant to distill the implication that \"women (candidates) are less qualified\"\nbehind the statement \"we shouldn\u2019t lower our standards to hire more women.\"\n", "citation": "@inproceedings{sap2020socialbiasframes,\n title={Social Bias Frames: Reasoning about Social and Power Implications of Language},\n author={Sap, Maarten and Gabriel, Saadia and Qin, Lianhui and Jurafsky, Dan and Smith, Noah A and Choi, Yejin},\n year={2020},\n booktitle={ACL},\n}\n", "homepage": "https://homes.cs.washington.edu/~msap/social-bias-frames/", "license": "", "features": {"whoTarget": {"dtype": "string", "id": null, "_type": "Value"}, "intentYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexYN": {"dtype": "string", "id": null, "_type": "Value"}, "sexReason": {"dtype": "string", "id": null, "_type": "Value"}, "offensiveYN": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorGender": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorMinority": {"dtype": "string", "id": null, "_type": "Value"}, "sexPhrase": {"dtype": "string", "id": null, "_type": "Value"}, "speakerMinorityYN": {"dtype": "string", "id": null, "_type": "Value"}, "WorkerId": {"dtype": "string", "id": null, "_type": "Value"}, "HITId": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorPolitics": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorRace": {"dtype": "string", "id": null, "_type": "Value"}, "annotatorAge": {"dtype": "string", "id": null, "_type": "Value"}, "post": {"dtype": "string", "id": null, "_type": "Value"}, "targetMinority": {"dtype": "string", "id": null, "_type": "Value"}, "targetCategory": {"dtype": "string", "id": null, "_type": "Value"}, "targetStereotype": {"dtype": "string", "id": null, "_type": "Value"}, "dataSource": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "social_bias_frames", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5371665, "num_examples": 17501, "dataset_name": "social_bias_frames"}, "validation": {"name": "validation", "num_bytes": 5096009, "num_examples": 16738, "dataset_name": "social_bias_frames"}, "train": {"name": "train", "num_bytes": 34006886, "num_examples": 112900, "dataset_name": "social_bias_frames"}}, "download_checksums": {"https://homes.cs.washington.edu/~msap/social-bias-frames/SBIC.v2.tgz": {"num_bytes": 6326977, "checksum": "6f7ecfbdf4f3d4f030496665a5371dba5c7c45fcfed32c14b7e225a0de40d7bb"}}, "download_size": 6326977, "post_processing_size": null, "dataset_size": 44474560, "size_in_bytes": 50801537}}
|
dummy/0.0.0/dummy_data.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5b2ff486df1d229a596aac518e89660ba4d14ddc9a3df05bbeb00b4f67f8b38c
|
3 |
+
size 1923
|
social_bias_frames.py
CHANGED
@@ -39,7 +39,7 @@ For example, these frames are meant to distill the implication that "women (cand
|
|
39 |
behind the statement "we shouldn’t lower our standards to hire more women."
|
40 |
"""
|
41 |
|
42 |
-
_DATA_URL = "https://homes.cs.washington.edu/~msap/social-bias-frames/
|
43 |
|
44 |
|
45 |
class SocialBiasFrames(datasets.GeneratorBasedBuilder):
|
@@ -68,6 +68,7 @@ class SocialBiasFrames(datasets.GeneratorBasedBuilder):
|
|
68 |
"targetMinority": datasets.Value("string"),
|
69 |
"targetCategory": datasets.Value("string"),
|
70 |
"targetStereotype": datasets.Value("string"),
|
|
|
71 |
}
|
72 |
),
|
73 |
# No default supervised_keys (as we have to pass both premise
|
@@ -81,13 +82,13 @@ class SocialBiasFrames(datasets.GeneratorBasedBuilder):
|
|
81 |
dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
82 |
return [
|
83 |
datasets.SplitGenerator(
|
84 |
-
name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "
|
85 |
),
|
86 |
datasets.SplitGenerator(
|
87 |
-
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir, "
|
88 |
),
|
89 |
datasets.SplitGenerator(
|
90 |
-
name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "
|
91 |
),
|
92 |
]
|
93 |
|
|
|
39 |
behind the statement "we shouldn’t lower our standards to hire more women."
|
40 |
"""
|
41 |
|
42 |
+
_DATA_URL = "https://homes.cs.washington.edu/~msap/social-bias-frames/SBIC.v2.tgz"
|
43 |
|
44 |
|
45 |
class SocialBiasFrames(datasets.GeneratorBasedBuilder):
|
|
|
68 |
"targetMinority": datasets.Value("string"),
|
69 |
"targetCategory": datasets.Value("string"),
|
70 |
"targetStereotype": datasets.Value("string"),
|
71 |
+
"dataSource": datasets.Value("string"),
|
72 |
}
|
73 |
),
|
74 |
# No default supervised_keys (as we have to pass both premise
|
|
|
82 |
dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
83 |
return [
|
84 |
datasets.SplitGenerator(
|
85 |
+
name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir, "SBIC.v2.tst.csv")}
|
86 |
),
|
87 |
datasets.SplitGenerator(
|
88 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir, "SBIC.v2.dev.csv")}
|
89 |
),
|
90 |
datasets.SplitGenerator(
|
91 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "SBIC.v2.trn.csv")}
|
92 |
),
|
93 |
]
|
94 |
|