TID-8 / dataset_info.json
dnaihao's picture
Added README for TID-8 dataset
ea7ef1c
{
"commitmentbank-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{de2019commitmentbank,\n title={The commitmentbank: Investigating projection in naturally occurring discourse},\n author={De Marneffe, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},\n booktitle={proceedings of Sinn und Bedeutung},\n volume={23},\n number={2},\n pages={107--124},\n year={2019}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "commitmentbank-ann",
"dataset_name": "tid8",
"dataset_size": 10507109,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nCommitmentBank (De Marneffe et al.,\n2019) is an NLI dataset. It contains naturally oc-\ncurring discourses whose final sentence contains\na clause-embedding predicate under an entailment\ncanceling operator (question, modal, negation, an-\ntecedent of conditional).\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/commitmentbank-ann.zip": {
"num_bytes": 1941977,
"checksum": null
}
},
"download_size": 1941977,
"features": {
"HitID": {
"dtype": "string",
"_type": "Value"
},
"Verb": {
"dtype": "string",
"_type": "Value"
},
"Context": {
"dtype": "string",
"_type": "Value"
},
"Prompt": {
"dtype": "string",
"_type": "Value"
},
"Target": {
"dtype": "string",
"_type": "Value"
},
"ModalType": {
"dtype": "string",
"_type": "Value"
},
"Embedding": {
"dtype": "string",
"_type": "Value"
},
"MatTense": {
"dtype": "string",
"_type": "Value"
},
"weak_labels": {
"feature": {
"dtype": "string",
"_type": "Value"
},
"_type": "Sequence"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"0",
"1",
"2",
"3",
"-3",
"-1",
"-2"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/mcdm/CommitmentBank",
"license": "",
"size_in_bytes": 12449086,
"splits": {
"train": {
"name": "train",
"num_bytes": 7153364,
"num_examples": 7816,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 3353745,
"num_examples": 3729,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"commitmentbank-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{de2019commitmentbank,\n title={The commitmentbank: Investigating projection in naturally occurring discourse},\n author={De Marneffe, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},\n booktitle={proceedings of Sinn und Bedeutung},\n volume={23},\n number={2},\n pages={107--124},\n year={2019}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "commitmentbank-atr",
"dataset_name": "tid8",
"dataset_size": 10507109,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nCommitmentBank (De Marneffe et al.,\n2019) is an NLI dataset. It contains naturally oc-\ncurring discourses whose final sentence contains\na clause-embedding predicate under an entailment\ncanceling operator (question, modal, negation, an-\ntecedent of conditional).\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/commitmentbank-atr.zip": {
"num_bytes": 1942215,
"checksum": null
}
},
"download_size": 1942215,
"features": {
"HitID": {
"dtype": "string",
"_type": "Value"
},
"Verb": {
"dtype": "string",
"_type": "Value"
},
"Context": {
"dtype": "string",
"_type": "Value"
},
"Prompt": {
"dtype": "string",
"_type": "Value"
},
"Target": {
"dtype": "string",
"_type": "Value"
},
"ModalType": {
"dtype": "string",
"_type": "Value"
},
"Embedding": {
"dtype": "string",
"_type": "Value"
},
"MatTense": {
"dtype": "string",
"_type": "Value"
},
"weak_labels": {
"feature": {
"dtype": "string",
"_type": "Value"
},
"_type": "Sequence"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"0",
"1",
"2",
"3",
"-3",
"-1",
"-2"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/mcdm/CommitmentBank",
"license": "",
"size_in_bytes": 12449324,
"splits": {
"train": {
"name": "train",
"num_bytes": 6636145,
"num_examples": 7274,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 3870964,
"num_examples": 4271,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"friends_qia-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{damgaard-etal-2021-ill,\n title = \"{``}{I}{'}ll be there for you{''}: The One with Understanding Indirect Answers\",\n author = \"Damgaard, Cathrine and\n Toborek, Paulina and\n Eriksen, Trine and\n Plank, Barbara\",\n booktitle = \"Proceedings of the 2nd Workshop on Computational Approaches to Discourse\",\n month = nov,\n year = \"2021\",\n address = \"Punta Cana, Dominican Republic and Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.codi-main.1\",\n doi = \"10.18653/v1/2021.codi-main.1\",\n pages = \"1--11\",\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "friends_qia-ann",
"dataset_name": "tid8",
"dataset_size": 6250338,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nFriends QIA (Damgaard et al., 2021) is a\ncorpus of classifying indirect answers to polar questions.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/friends_qia-ann.zip": {
"num_bytes": 818058,
"checksum": null
}
},
"download_size": 818058,
"features": {
"Season": {
"dtype": "string",
"_type": "Value"
},
"Episode": {
"dtype": "string",
"_type": "Value"
},
"Category": {
"dtype": "string",
"_type": "Value"
},
"Q_person": {
"dtype": "string",
"_type": "Value"
},
"A_person": {
"dtype": "string",
"_type": "Value"
},
"Q_original": {
"dtype": "string",
"_type": "Value"
},
"Q_modified": {
"dtype": "string",
"_type": "Value"
},
"A_modified": {
"dtype": "string",
"_type": "Value"
},
"Annotation_1": {
"dtype": "string",
"_type": "Value"
},
"Annotation_2": {
"dtype": "string",
"_type": "Value"
},
"Annotation_3": {
"dtype": "string",
"_type": "Value"
},
"Goldstandard": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"1",
"2",
"3",
"4",
"5"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/friendsQIA/Friends_QIA",
"license": "",
"size_in_bytes": 7068396,
"splits": {
"validation": {
"name": "validation",
"num_bytes": 687135,
"num_examples": 1872,
"dataset_name": "tid8"
},
"train": {
"name": "train",
"num_bytes": 4870170,
"num_examples": 13113,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 693033,
"num_examples": 1872,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"friends_qia-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{damgaard-etal-2021-ill,\n title = \"{``}{I}{'}ll be there for you{''}: The One with Understanding Indirect Answers\",\n author = \"Damgaard, Cathrine and\n Toborek, Paulina and\n Eriksen, Trine and\n Plank, Barbara\",\n booktitle = \"Proceedings of the 2nd Workshop on Computational Approaches to Discourse\",\n month = nov,\n year = \"2021\",\n address = \"Punta Cana, Dominican Republic and Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.codi-main.1\",\n doi = \"10.18653/v1/2021.codi-main.1\",\n pages = \"1--11\",\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "friends_qia-atr",
"dataset_name": "tid8",
"dataset_size": 6250338,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nFriends QIA (Damgaard et al., 2021) is a\ncorpus of classifying indirect answers to polar questions.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/friends_qia-atr.zip": {
"num_bytes": 1826735,
"checksum": null
}
},
"download_size": 1826735,
"features": {
"Season": {
"dtype": "string",
"_type": "Value"
},
"Episode": {
"dtype": "string",
"_type": "Value"
},
"Category": {
"dtype": "string",
"_type": "Value"
},
"Q_person": {
"dtype": "string",
"_type": "Value"
},
"A_person": {
"dtype": "string",
"_type": "Value"
},
"Q_original": {
"dtype": "string",
"_type": "Value"
},
"Q_modified": {
"dtype": "string",
"_type": "Value"
},
"A_modified": {
"dtype": "string",
"_type": "Value"
},
"Annotation_1": {
"dtype": "string",
"_type": "Value"
},
"Annotation_2": {
"dtype": "string",
"_type": "Value"
},
"Annotation_3": {
"dtype": "string",
"_type": "Value"
},
"Goldstandard": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"1",
"2",
"3",
"4",
"5"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/friendsQIA/Friends_QIA",
"license": "",
"size_in_bytes": 8077073,
"splits": {
"train": {
"name": "train",
"num_bytes": 4166892,
"num_examples": 11238,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 2083446,
"num_examples": 5619,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"goemotions-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{demszky-etal-2020-goemotions,\n title = \"{G}o{E}motions: A Dataset of Fine-Grained Emotions\",\n author = \"Demszky, Dorottya and\n Movshovitz-Attias, Dana and\n Ko, Jeongwoo and\n Cowen, Alan and\n Nemade, Gaurav and\n Ravi, Sujith\",\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2020.acl-main.372\",\n doi = \"10.18653/v1/2020.acl-main.372\",\n pages = \"4040--4054\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "goemotions-ann",
"dataset_name": "tid8",
"dataset_size": 66108105,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nGo Emotions (Demszky et al., 2020) is a\nfine-grained emotion classification corpus of care-\nfully curated comments extracted from Reddit. We\ngroup emotions into four categories following sen-\ntiment level divides in the original paper.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/goemotions-ann.zip": {
"num_bytes": 19388288,
"checksum": null
}
},
"download_size": 19388288,
"features": {
"author": {
"dtype": "string",
"_type": "Value"
},
"subreddit": {
"dtype": "string",
"_type": "Value"
},
"link_id": {
"dtype": "string",
"_type": "Value"
},
"parent_id": {
"dtype": "string",
"_type": "Value"
},
"created_utc": {
"dtype": "string",
"_type": "Value"
},
"rater_id": {
"dtype": "string",
"_type": "Value"
},
"example_very_unclear": {
"dtype": "string",
"_type": "Value"
},
"admiration": {
"dtype": "string",
"_type": "Value"
},
"amusement": {
"dtype": "string",
"_type": "Value"
},
"anger": {
"dtype": "string",
"_type": "Value"
},
"annoyance": {
"dtype": "string",
"_type": "Value"
},
"approval": {
"dtype": "string",
"_type": "Value"
},
"caring": {
"dtype": "string",
"_type": "Value"
},
"confusion": {
"dtype": "string",
"_type": "Value"
},
"curiosity": {
"dtype": "string",
"_type": "Value"
},
"desire": {
"dtype": "string",
"_type": "Value"
},
"disappointment": {
"dtype": "string",
"_type": "Value"
},
"disapproval": {
"dtype": "string",
"_type": "Value"
},
"disgust": {
"dtype": "string",
"_type": "Value"
},
"embarrassment": {
"dtype": "string",
"_type": "Value"
},
"excitement": {
"dtype": "string",
"_type": "Value"
},
"fear": {
"dtype": "string",
"_type": "Value"
},
"gratitude": {
"dtype": "string",
"_type": "Value"
},
"grief": {
"dtype": "string",
"_type": "Value"
},
"joy": {
"dtype": "string",
"_type": "Value"
},
"love": {
"dtype": "string",
"_type": "Value"
},
"nervousness": {
"dtype": "string",
"_type": "Value"
},
"optimism": {
"dtype": "string",
"_type": "Value"
},
"pride": {
"dtype": "string",
"_type": "Value"
},
"realization": {
"dtype": "string",
"_type": "Value"
},
"relief": {
"dtype": "string",
"_type": "Value"
},
"remorse": {
"dtype": "string",
"_type": "Value"
},
"sadness": {
"dtype": "string",
"_type": "Value"
},
"surprise": {
"dtype": "string",
"_type": "Value"
},
"neutral": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"positive",
"ambiguous",
"negative",
"neutral"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/google-research/google-research/tree/master/goemotions",
"license": "",
"size_in_bytes": 85496393,
"splits": {
"train": {
"name": "train",
"num_bytes": 46277072,
"num_examples": 135504,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 19831033,
"num_examples": 58129,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"goemotions-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{demszky-etal-2020-goemotions,\n title = \"{G}o{E}motions: A Dataset of Fine-Grained Emotions\",\n author = \"Demszky, Dorottya and\n Movshovitz-Attias, Dana and\n Ko, Jeongwoo and\n Cowen, Alan and\n Nemade, Gaurav and\n Ravi, Sujith\",\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2020.acl-main.372\",\n doi = \"10.18653/v1/2020.acl-main.372\",\n pages = \"4040--4054\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "goemotions-atr",
"dataset_name": "tid8",
"dataset_size": 66108105,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nGo Emotions (Demszky et al., 2020) is a\nfine-grained emotion classification corpus of care-\nfully curated comments extracted from Reddit. We\ngroup emotions into four categories following sen-\ntiment level divides in the original paper.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/goemotions-atr.zip": {
"num_bytes": 19146912,
"checksum": null
}
},
"download_size": 19146912,
"features": {
"author": {
"dtype": "string",
"_type": "Value"
},
"subreddit": {
"dtype": "string",
"_type": "Value"
},
"link_id": {
"dtype": "string",
"_type": "Value"
},
"parent_id": {
"dtype": "string",
"_type": "Value"
},
"created_utc": {
"dtype": "string",
"_type": "Value"
},
"rater_id": {
"dtype": "string",
"_type": "Value"
},
"example_very_unclear": {
"dtype": "string",
"_type": "Value"
},
"admiration": {
"dtype": "string",
"_type": "Value"
},
"amusement": {
"dtype": "string",
"_type": "Value"
},
"anger": {
"dtype": "string",
"_type": "Value"
},
"annoyance": {
"dtype": "string",
"_type": "Value"
},
"approval": {
"dtype": "string",
"_type": "Value"
},
"caring": {
"dtype": "string",
"_type": "Value"
},
"confusion": {
"dtype": "string",
"_type": "Value"
},
"curiosity": {
"dtype": "string",
"_type": "Value"
},
"desire": {
"dtype": "string",
"_type": "Value"
},
"disappointment": {
"dtype": "string",
"_type": "Value"
},
"disapproval": {
"dtype": "string",
"_type": "Value"
},
"disgust": {
"dtype": "string",
"_type": "Value"
},
"embarrassment": {
"dtype": "string",
"_type": "Value"
},
"excitement": {
"dtype": "string",
"_type": "Value"
},
"fear": {
"dtype": "string",
"_type": "Value"
},
"gratitude": {
"dtype": "string",
"_type": "Value"
},
"grief": {
"dtype": "string",
"_type": "Value"
},
"joy": {
"dtype": "string",
"_type": "Value"
},
"love": {
"dtype": "string",
"_type": "Value"
},
"nervousness": {
"dtype": "string",
"_type": "Value"
},
"optimism": {
"dtype": "string",
"_type": "Value"
},
"pride": {
"dtype": "string",
"_type": "Value"
},
"realization": {
"dtype": "string",
"_type": "Value"
},
"relief": {
"dtype": "string",
"_type": "Value"
},
"remorse": {
"dtype": "string",
"_type": "Value"
},
"sadness": {
"dtype": "string",
"_type": "Value"
},
"surprise": {
"dtype": "string",
"_type": "Value"
},
"neutral": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"positive",
"ambiguous",
"negative",
"neutral"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/google-research/google-research/tree/master/goemotions",
"license": "",
"size_in_bytes": 85255017,
"splits": {
"train": {
"name": "train",
"num_bytes": 44856233,
"num_examples": 131395,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 21251872,
"num_examples": 62238,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"hs_brexit-ann": {
"builder_name": "tid8",
"citation": "@article{akhtar2021whose,\n title={Whose opinions matter? perspective-aware models to identify opinions of hate speech victims in abusive language detection},\n author={Akhtar, Sohail and Basile, Valerio and Patti, Viviana},\n journal={arXiv preprint arXiv:2106.15896},\n year={2021}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "hs_brexit-ann",
"dataset_name": "tid8",
"dataset_size": 1261034,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nHS-Brexit (Akhtar et al., 2021) is an abu-\nsive language detection corpus on Brexit belonging\nto two distinct groups: a target group of three Mus-\nlim immigrants in the UK, and a control group of\nthree other individuals.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/hs_brexit-ann.zip": {
"num_bytes": 127608,
"checksum": null
}
},
"download_size": 127608,
"features": {
"other annotations": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"hate_speech",
"not_hate_speech"
],
"_type": "ClassLabel"
}
},
"homepage": "https://le-wi-di.github.io/",
"license": "",
"size_in_bytes": 1388642,
"splits": {
"train": {
"name": "train",
"num_bytes": 1039008,
"num_examples": 4704,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 222026,
"num_examples": 1008,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"hs_brexit-atr": {
"builder_name": "tid8",
"citation": "@article{akhtar2021whose,\n title={Whose opinions matter? perspective-aware models to identify opinions of hate speech victims in abusive language detection},\n author={Akhtar, Sohail and Basile, Valerio and Patti, Viviana},\n journal={arXiv preprint arXiv:2106.15896},\n year={2021}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "hs_brexit-atr",
"dataset_name": "tid8",
"dataset_size": 1481870,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nHS-Brexit (Akhtar et al., 2021) is an abu-\nsive language detection corpus on Brexit belonging\nto two distinct groups: a target group of three Mus-\nlim immigrants in the UK, and a control group of\nthree other individuals.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/hs_brexit-atr.zip": {
"num_bytes": 408475,
"checksum": null
}
},
"download_size": 408475,
"features": {
"other annotations": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"hate_speech",
"not_hate_speech"
],
"_type": "ClassLabel"
}
},
"homepage": "https://le-wi-di.github.io/",
"license": "",
"size_in_bytes": 1890345,
"splits": {
"train": {
"name": "train",
"num_bytes": 986132,
"num_examples": 4480,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 495738,
"num_examples": 2240,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"humor-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{simpson-etal-2019-predicting,\n title = \"Predicting Humorousness and Metaphor Novelty with {G}aussian Process Preference Learning\",\n author = \"Simpson, Edwin and\n Do Dinh, Erik-L{\\^a}n and\n Miller, Tristan and\n Gurevych, Iryna\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/P19-1572\",\n doi = \"10.18653/v1/P19-1572\",\n pages = \"5716--5728\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "humor-ann",
"dataset_name": "tid8",
"dataset_size": 40745460,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nHumor (Simpson et al., 2019) is a corpus\nof online texts for pairwise humorousness compari-\nson\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/humor-ann.zip": {
"num_bytes": 10682583,
"checksum": null
}
},
"download_size": 10682583,
"features": {
"text_a": {
"dtype": "string",
"_type": "Value"
},
"text_b": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"B",
"X",
"A"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/ukplab/acl2019-GPPL-humour-metaphor",
"license": "",
"size_in_bytes": 51428043,
"splits": {
"train": {
"name": "train",
"num_bytes": 28524839,
"num_examples": 98735,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 12220621,
"num_examples": 42315,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"humor-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{simpson-etal-2019-predicting,\n title = \"Predicting Humorousness and Metaphor Novelty with {G}aussian Process Preference Learning\",\n author = \"Simpson, Edwin and\n Do Dinh, Erik-L{\\^a}n and\n Miller, Tristan and\n Gurevych, Iryna\",\n booktitle = \"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/P19-1572\",\n doi = \"10.18653/v1/P19-1572\",\n pages = \"5716--5728\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "humor-atr",
"dataset_name": "tid8",
"dataset_size": 40745460,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nHumor (Simpson et al., 2019) is a corpus\nof online texts for pairwise humorousness compari-\nson\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/humor-atr.zip": {
"num_bytes": 10461981,
"checksum": null
}
},
"download_size": 10461981,
"features": {
"text_a": {
"dtype": "string",
"_type": "Value"
},
"text_b": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"B",
"X",
"A"
],
"_type": "ClassLabel"
}
},
"homepage": "https://github.com/ukplab/acl2019-GPPL-humour-metaphor",
"license": "",
"size_in_bytes": 51207441,
"splits": {
"train": {
"name": "train",
"num_bytes": 28161248,
"num_examples": 97410,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 12584212,
"num_examples": 43640,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"md-agreement-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{leonardelli-etal-2021-agreeing,\n title = \"Agreeing to Disagree: Annotating Offensive Language Datasets with Annotators{'} Disagreement\",\n author = \"Leonardelli, Elisa and. Menini, Stefano and\n Palmero Aprosio, Alessio and\n Guerini, Marco and\n Tonelli, Sara\",\n booktitle = \"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing\",\n month = nov,\n year = \"2021\",\n address = \"Online and Punta Cana, Dominican Republic\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.emnlp-main.822\",\n pages = \"10528--10539\",\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "md-agreement-ann",
"dataset_name": "tid8",
"dataset_size": 10293433,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nMultiDomain Agreement (Leonardelli\net al., 2021) is a hate speech classification dataset of\nEnglish tweets from three domains of Black Lives\nMatter, Election, and Covid-19, with a particular\nfocus on tweets that potentially leads to disagree-\nment.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/md-agreement-ann.zip": {
"num_bytes": 1414114,
"checksum": null
}
},
"download_size": 1414114,
"features": {
"task": {
"dtype": "string",
"_type": "Value"
},
"original_id": {
"dtype": "string",
"_type": "Value"
},
"domain": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"offensive_speech",
"not_offensive_speech"
],
"_type": "ClassLabel"
}
},
"homepage": "https://le-wi-di.github.io/",
"license": "",
"size_in_bytes": 11707547,
"splits": {
"train": {
"name": "train",
"num_bytes": 7794988,
"num_examples": 32960,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 2498445,
"num_examples": 10553,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"md-agreement-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{leonardelli-etal-2021-agreeing,\n title = \"Agreeing to Disagree: Annotating Offensive Language Datasets with Annotators{'} Disagreement\",\n author = \"Leonardelli, Elisa and. Menini, Stefano and\n Palmero Aprosio, Alessio and\n Guerini, Marco and\n Tonelli, Sara\",\n booktitle = \"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing\",\n month = nov,\n year = \"2021\",\n address = \"Online and Punta Cana, Dominican Republic\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.emnlp-main.822\",\n pages = \"10528--10539\",\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "md-agreement-atr",
"dataset_name": "tid8",
"dataset_size": 12734106,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nMultiDomain Agreement (Leonardelli\net al., 2021) is a hate speech classification dataset of\nEnglish tweets from three domains of Black Lives\nMatter, Election, and Covid-19, with a particular\nfocus on tweets that potentially leads to disagree-\nment.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/md-agreement-atr.zip": {
"num_bytes": 4121140,
"checksum": null
}
},
"download_size": 4121140,
"features": {
"task": {
"dtype": "string",
"_type": "Value"
},
"original_id": {
"dtype": "string",
"_type": "Value"
},
"domain": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"offensive_speech",
"not_offensive_speech"
],
"_type": "ClassLabel"
}
},
"homepage": "https://le-wi-di.github.io/",
"license": "",
"size_in_bytes": 16855246,
"splits": {
"train": {
"name": "train",
"num_bytes": 8777085,
"num_examples": 37077,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 3957021,
"num_examples": 16688,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"pejorative-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{dinu-etal-2021-computational-exploration,\n title = \"A Computational Exploration of Pejorative Language in Social Media\",\n author = \"Dinu, Liviu P. and\n Iordache, Ioan-Bogdan and\n Uban, Ana Sabina and\n Zampieri, Marcos\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2021\",\n month = nov,\n year = \"2021\",\n address = \"Punta Cana, Dominican Republic\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.findings-emnlp.296\",\n doi = \"10.18653/v1/2021.findings-emnlp.296\",\n pages = \"3493--3498\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "pejorative-ann",
"dataset_name": "tid8",
"dataset_size": 501628,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nPejorative (Dinu et al., 2021) classifies\nwhether Tweets contain words that are used pejora-\ntively. By definition, pejorative words are words or\nphrases that have negative connotations or that are\nintended to disparage or belittle.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/pejorative-ann.zip": {
"num_bytes": 142769,
"checksum": null
}
},
"download_size": 142769,
"features": {
"pejor_word": {
"dtype": "string",
"_type": "Value"
},
"word_definition": {
"dtype": "string",
"_type": "Value"
},
"annotator-1": {
"dtype": "string",
"_type": "Value"
},
"annotator-2": {
"dtype": "string",
"_type": "Value"
},
"annotator-3": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"pejorative",
"non-pejorative",
"undecided"
],
"_type": "ClassLabel"
}
},
"homepage": "https://nlp.unibuc.ro/resources.html",
"license": "",
"size_in_bytes": 644397,
"splits": {
"train": {
"name": "train",
"num_bytes": 350734,
"num_examples": 1535,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 150894,
"num_examples": 659,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"pejorative-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{dinu-etal-2021-computational-exploration,\n title = \"A Computational Exploration of Pejorative Language in Social Media\",\n author = \"Dinu, Liviu P. and\n Iordache, Ioan-Bogdan and\n Uban, Ana Sabina and\n Zampieri, Marcos\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2021\",\n month = nov,\n year = \"2021\",\n address = \"Punta Cana, Dominican Republic\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.findings-emnlp.296\",\n doi = \"10.18653/v1/2021.findings-emnlp.296\",\n pages = \"3493--3498\"\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "pejorative-atr",
"dataset_name": "tid8",
"dataset_size": 501628,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nPejorative (Dinu et al., 2021) classifies\nwhether Tweets contain words that are used pejora-\ntively. By definition, pejorative words are words or\nphrases that have negative connotations or that are\nintended to disparage or belittle.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/pejorative-atr.zip": {
"num_bytes": 135023,
"checksum": null
}
},
"download_size": 135023,
"features": {
"pejor_word": {
"dtype": "string",
"_type": "Value"
},
"word_definition": {
"dtype": "string",
"_type": "Value"
},
"annotator-1": {
"dtype": "string",
"_type": "Value"
},
"annotator-2": {
"dtype": "string",
"_type": "Value"
},
"annotator-3": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"pejorative",
"non-pejorative",
"undecided"
],
"_type": "ClassLabel"
}
},
"homepage": "https://nlp.unibuc.ro/resources.html",
"license": "",
"size_in_bytes": 636651,
"splits": {
"train": {
"name": "train",
"num_bytes": 254138,
"num_examples": 1112,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 247490,
"num_examples": 1082,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"sentiment-ann": {
"builder_name": "tid8",
"citation": "@inproceedings{diaz2018addressing,\n title={Addressing age-related bias in sentiment analysis},\n author={D{'\\i}az, Mark and Johnson, Isaac and Lazar, Amanda and Piper, Anne Marie and Gergle, Darren},\n booktitle={Proceedings of the 2018 chi conference on human factors in computing systems},\n pages={1--14},\n year={2018}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "sentiment-ann",
"dataset_name": "tid8",
"dataset_size": 9585346,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nSentiment Analysis (D\u00edaz et al., 2018) is a\nsentiment classification dataset originally used to\ndetect age-related sentiments.\nAnnotation Split:\nWe split the annotations for each annotator into train and test set.\n\nIn other words, the same set of annotators appear in both train, (val),\nand test sets.\n\nFor datasets that have splits originally, we follow the original split and remove\ndatapoints in test sets that are annotated by an annotator who is not in\nthe training set.\n\nFor datasets that do not have splits originally, we split the data into \ntrain and test set for convenience, you may further split the train set\ninto a train and val set.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/sentiment-ann.zip": {
"num_bytes": 3371941,
"checksum": null
}
},
"download_size": 3371941,
"features": {
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"Neutral",
"Somewhat positive",
"Very negative",
"Somewhat negative",
"Very positive"
],
"_type": "ClassLabel"
}
},
"homepage": "https://dataverse.harvard.edu/dataverse/algorithm-age-bias",
"license": "",
"size_in_bytes": 12957287,
"splits": {
"train": {
"name": "train",
"num_bytes": 9350333,
"num_examples": 59235,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 235013,
"num_examples": 1419,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
},
"sentiment-atr": {
"builder_name": "tid8",
"citation": "@inproceedings{diaz2018addressing,\n title={Addressing age-related bias in sentiment analysis},\n author={D{'\\i}az, Mark and Johnson, Isaac and Lazar, Amanda and Piper, Anne Marie and Gergle, Darren},\n booktitle={Proceedings of the 2018 chi conference on human factors in computing systems},\n pages={1--14},\n year={2018}\n}\n@inproceedings{deng2023tid8,\n title={You Are What You Annotate: Towards Better Models through Annotator Representations},\n author={Deng, Naihao and Liu, Siyang and Zhang, Frederick Xinliang and Wu, Winston and Wang, Lu and Mihalcea, Rada},\n booktitle={Findings of EMNLP 2023},\n year={2023}\n}\nNote that each TID-8 dataset has its own citation. Please see the source to\nget the correct citation for each contained dataset.\n",
"config_name": "sentiment-atr",
"dataset_name": "tid8",
"dataset_size": 9585346,
"description": "TID-8 is a new benchmark focused on the task of letting models learn from data that has inherent disagreement.\n\nSentiment Analysis (D\u00edaz et al., 2018) is a\nsentiment classification dataset originally used to\ndetect age-related sentiments.\nAnnotator Split:\nWe split annotators into train and test set.\n\nIn other words, a different set of annotators would appear in train and test sets.\n\nWe split the data into train and test set for convenience, you may consider\nfurther splitting the train set into a train and val set for performance validation.\n",
"download_checksums": {
"https://raw.githubusercontent.com/MichiganNLP/tid8-dataset/main/huggingface-data/sentiment-atr.zip": {
"num_bytes": 3352022,
"checksum": null
}
},
"download_size": 3352022,
"features": {
"question": {
"dtype": "string",
"_type": "Value"
},
"uid": {
"dtype": "string",
"_type": "Value"
},
"id": {
"dtype": "int32",
"_type": "Value"
},
"annotator_id": {
"dtype": "string",
"_type": "Value"
},
"answer": {
"dtype": "string",
"_type": "Value"
},
"answer_label": {
"names": [
"Neutral",
"Somewhat positive",
"Very negative",
"Somewhat negative",
"Very positive"
],
"_type": "ClassLabel"
}
},
"homepage": "https://dataverse.harvard.edu/dataverse/algorithm-age-bias",
"license": "",
"size_in_bytes": 12937368,
"splits": {
"train": {
"name": "train",
"num_bytes": 6712084,
"num_examples": 42439,
"dataset_name": "tid8"
},
"test": {
"name": "test",
"num_bytes": 2873262,
"num_examples": 18215,
"dataset_name": "tid8"
}
},
"version": {
"version_str": "1.0.3",
"major": 1,
"minor": 0,
"patch": 3
}
}
}