albertvillanova HF staff commited on
Commit
dbe7278
1 Parent(s): 1cf017b

Delete legacy JSON metadata (#3)

Browse files

- Delete legacy JSON metadata (2cbde74ef8bd8da9d4b8f18cae265c26eb3093ee)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"Coarse_Grained": {"description": " The Roman Urdu Hate-Speech and Offensive Language Detection (RUHSOLD) dataset is a Roman Urdu dataset of tweets annotated by experts in the relevant language. The authors develop the gold-standard for two sub-tasks. First sub-task is based on binary labels of Hate-Offensive content and Normal content (i.e., inoffensive language). These labels are self-explanatory. The authors refer to this sub-task as coarse-grained classification. Second sub-task defines Hate-Offensive content with four labels at a granular level. These labels are the most relevant for the demographic of users who converse in RU and are defined in related literature. The authors refer to this sub-task as fine-grained classification. The objective behind creating two gold-standards is to enable the researchers to evaluate the hate speech detection approaches on both easier (coarse-grained) and challenging (fine-grained) scenarios. ", "citation": "@inproceedings{rizwan2020hate,\n title={Hate-speech and offensive language detection in roman Urdu},\n author={Rizwan, Hammad and Shakeel, Muhammad Haroon and Karim, Asim},\n booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},\n pages={2512--2522},\n year={2020}\n}\n", "homepage": "https://github.com/haroonshakeel/roman_urdu_hate_speech", "license": "MIT License", "features": {"tweet": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["Abusive/Offensive", "Normal"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "tweet", "label_column": "label"}], "builder_name": "roman_urdu_hate_speech", "config_name": "Coarse_Grained", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 725719, "num_examples": 7208, "dataset_name": "roman_urdu_hate_speech"}, "test": {"name": "test", "num_bytes": 218087, "num_examples": 2002, "dataset_name": "roman_urdu_hate_speech"}, "validation": {"name": "validation", "num_bytes": 79759, "num_examples": 800, "dataset_name": "roman_urdu_hate_speech"}}, "download_checksums": {"https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_1_train.tsv": {"num_bytes": 668097, "checksum": "6236116609a80aaf6b9c7fab8f8d236b148d4638c6255a178c0d79d7766aa3b4"}, "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_1_validation.tsv": {"num_bytes": 73747, "checksum": "eff8a097b0d8974bec2158b8e0512b43537cbf796c828ca64fd3841fc8dee0cb"}, "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_1_test.tsv": {"num_bytes": 186093, "checksum": "c08a90dd63e35a0eb3737c90f7bc09917b2832e56ffab8b37fff89499a419fe2"}}, "download_size": 927937, "post_processing_size": null, "dataset_size": 1023565, "size_in_bytes": 1951502}, "Fine_Grained": {"description": " The Roman Urdu Hate-Speech and Offensive Language Detection (RUHSOLD) dataset is a Roman Urdu dataset of tweets annotated by experts in the relevant language. The authors develop the gold-standard for two sub-tasks. First sub-task is based on binary labels of Hate-Offensive content and Normal content (i.e., inoffensive language). These labels are self-explanatory. The authors refer to this sub-task as coarse-grained classification. Second sub-task defines Hate-Offensive content with four labels at a granular level. These labels are the most relevant for the demographic of users who converse in RU and are defined in related literature. The authors refer to this sub-task as fine-grained classification. The objective behind creating two gold-standards is to enable the researchers to evaluate the hate speech detection approaches on both easier (coarse-grained) and challenging (fine-grained) scenarios. ", "citation": "@inproceedings{rizwan2020hate,\n title={Hate-speech and offensive language detection in roman Urdu},\n author={Rizwan, Hammad and Shakeel, Muhammad Haroon and Karim, Asim},\n booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},\n pages={2512--2522},\n year={2020}\n}\n", "homepage": "https://github.com/haroonshakeel/roman_urdu_hate_speech", "license": "MIT License", "features": {"tweet": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 5, "names": ["Abusive/Offensive", "Normal", "Religious Hate", "Sexism", "Profane/Untargeted"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "tweet", "label_column": "label"}], "builder_name": "roman_urdu_hate_speech", "config_name": "Fine_Grained", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 723670, "num_examples": 7208, "dataset_name": "roman_urdu_hate_speech"}, "test": {"name": "test", "num_bytes": 219359, "num_examples": 2002, "dataset_name": "roman_urdu_hate_speech"}, "validation": {"name": "validation", "num_bytes": 723670, "num_examples": 7208, "dataset_name": "roman_urdu_hate_speech"}}, "download_checksums": {"https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_2_train.tsv": {"num_bytes": 666024, "checksum": "936bbb67990f6e19e136ecde7f313b3acf266ce50824deebb06a6513dc9341be"}, "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_2_validation.tsv": {"num_bytes": 666024, "checksum": "936bbb67990f6e19e136ecde7f313b3acf266ce50824deebb06a6513dc9341be"}, "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/task_2_test.tsv": {"num_bytes": 187375, "checksum": "09e90a3a59dfaef64a4a4debd105254ecd1749312a1a6b275d7377c73ea5b8ca"}}, "download_size": 1519423, "post_processing_size": null, "dataset_size": 1666699, "size_in_bytes": 3186122}}