tweet_eval / dataset_infos.json
albertvillanova's picture
Add offensive data files
9b98abe
raw
history blame
33.8 kB
{
"emoji": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"\u2764",
"\ud83d\ude0d",
"\ud83d\ude02",
"\ud83d\udc95",
"\ud83d\udd25",
"\ud83d\ude0a",
"\ud83d\ude0e",
"\u2728",
"\ud83d\udc99",
"\ud83d\ude18",
"\ud83d\udcf7",
"\ud83c\uddfa\ud83c\uddf8",
"\u2600",
"\ud83d\udc9c",
"\ud83d\ude09",
"\ud83d\udcaf",
"\ud83d\ude01",
"\ud83c\udf84",
"\ud83d\udcf8",
"\ud83d\ude1c"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "emoji",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 3803167,
"num_examples": 45000,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 4255901,
"num_examples": 50000,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 396079,
"num_examples": 5000,
"dataset_name": null
}
},
"download_size": 5939308,
"dataset_size": 8455147,
"size_in_bytes": 14394455
},
"emotion": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"anger",
"joy",
"optimism",
"sadness"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "emotion",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 338871,
"num_examples": 3257,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 146645,
"num_examples": 1421,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 38273,
"num_examples": 374,
"dataset_name": null
}
},
"download_size": 367016,
"dataset_size": 523789,
"size_in_bytes": 890805
},
"hate": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non-hate",
"hate"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "hate",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 1223650,
"num_examples": 9000,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 428934,
"num_examples": 2970,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 154144,
"num_examples": 1000,
"dataset_name": null
}
},
"download_size": 1196346,
"dataset_size": 1806728,
"size_in_bytes": 3003074
},
"irony": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non_irony",
"irony"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "irony",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 259187,
"num_examples": 2862,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 75897,
"num_examples": 784,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 86017,
"num_examples": 955,
"dataset_name": null
}
},
"download_size": 297647,
"dataset_size": 421101,
"size_in_bytes": 718748
},
"offensive": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non-offensive",
"offensive"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "offensive",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 1648061,
"num_examples": 11916,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 135473,
"num_examples": 860,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 192417,
"num_examples": 1324,
"dataset_name": null
}
},
"download_size": 1234528,
"dataset_size": 1975951,
"size_in_bytes": 3210479
},
"sentiment": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"negative",
"neutral",
"positive"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "sentiment",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 5425142,
"num_examples": 45615,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 1279548,
"num_examples": 12284,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 239088,
"num_examples": 2000,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_text.txt": {
"num_bytes": 4970029,
"checksum": "368f01052ea6fd8ffc408a2a2e6ac9669e31542581a0396ef16591ea26eb98a6"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_labels.txt": {
"num_bytes": 91230,
"checksum": "122bfb1732fb6995b0e5c5f726c0ba457c469c3b6e60513007ce5037f23e65d4"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_text.txt": {
"num_bytes": 1156877,
"checksum": "09a93a55c63fd93f97485ef7302889d7edb4091cd49733aa37da094f0bfa0675"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_labels.txt": {
"num_bytes": 24568,
"checksum": "6afb4afe9374d1f983bcf9a7c79b108d0f37fdf020a83f30488309bed215db9d"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_text.txt": {
"num_bytes": 219137,
"checksum": "e5b021e6fc45064c260b09814b803d8f56cada519c4d952d72f43d48a350a964"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_labels.txt": {
"num_bytes": 4000,
"checksum": "b4566926c72e2e4e2916c864def94e76c4cdde52446af2c7ba4fc2006e057e51"
}
},
"download_size": 6465841,
"post_processing_size": null,
"dataset_size": 6943778,
"size_in_bytes": 13409619
},
"stance_abortion": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_abortion",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 68698,
"num_examples": 587,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 33175,
"num_examples": 280,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 7661,
"num_examples": 66,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_text.txt": {
"num_bytes": 62828,
"checksum": "a421d5b8fd9f972970b9275b83f65745bf81986d2a412b4caa2ba071f3efa916"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_labels.txt": {
"num_bytes": 1174,
"checksum": "e6786a594bd9a083c524a0f420c690351140b52af288f487cb4772d29675b014"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_text.txt": {
"num_bytes": 30371,
"checksum": "bf0e16a0b8ca4cf0ab90efbc560db3151c288fc842f5e3c6554e8589d521556a"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_labels.txt": {
"num_bytes": 560,
"checksum": "c90e6d36d863f876d6661620d37b613b4b07858a5277c8d6623713ee59ca451c"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_text.txt": {
"num_bytes": 6997,
"checksum": "0428ab3f2894936f2445a9020763c2bd19ed42986872168bb65886dede5843fd"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_labels.txt": {
"num_bytes": 132,
"checksum": "8df57a50823d5f3683ecf75d824a42e3b08eb52e25e3e2d6928f523097a0c050"
}
},
"download_size": 102062,
"post_processing_size": null,
"dataset_size": 109534,
"size_in_bytes": 211596
},
"stance_atheism": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_atheism",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 54779,
"num_examples": 461,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 25720,
"num_examples": 220,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 6324,
"num_examples": 52,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_text.txt": {
"num_bytes": 50165,
"checksum": "0e82f1d4a16d79a38a68aee761762cf8a846bc8f7f9395670ca44e2ecf2f58f7"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_labels.txt": {
"num_bytes": 922,
"checksum": "a764aac1a75ccb32c4ffc4c03c66dc365cb50f013d3e94549bf775636cbc8373"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_text.txt": {
"num_bytes": 23516,
"checksum": "16c5336b2cba606ca63a6afcc50241be63a8fccf021628c6505449439b9d54b3"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_labels.txt": {
"num_bytes": 440,
"checksum": "4ef7c9398d265cfac625092c834e43cef9da9cb318e563493abb64f65dfe1b52"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_text.txt": {
"num_bytes": 5800,
"checksum": "5fe14c4c01f87a45dba640dddbb1d1909a893f9565f159c48fa1ba35bb46c209"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_labels.txt": {
"num_bytes": 104,
"checksum": "638095b3582f927fd1481cdb8d1f9f670f8d27880baf32c0b26c5946fd8f8292"
}
},
"download_size": 80947,
"post_processing_size": null,
"dataset_size": 86823,
"size_in_bytes": 167770
},
"stance_climate": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_climate",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 40253,
"num_examples": 355,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 19929,
"num_examples": 169,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 4805,
"num_examples": 40,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_text.txt": {
"num_bytes": 36699,
"checksum": "4803211832d318026323a8e5014cff1b95e1c8c3854378101e5d1a8c82582eb7"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_labels.txt": {
"num_bytes": 710,
"checksum": "d6274f55bc95f5a7f2ae591b886c1414a7664aaf4e0c609f4ba6cf377929af18"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_text.txt": {
"num_bytes": 18235,
"checksum": "41ee8ee2ad3c36e0629654fdb271f37775197c79be8b299adbeadd2003b63c53"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_labels.txt": {
"num_bytes": 338,
"checksum": "193c9f2358f61d9efe558324ec89ecaf08e600a44b68128f47838c01d9f98dfd"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_text.txt": {
"num_bytes": 4401,
"checksum": "fc5714703add266801ee2fd98296ea20ec0879e89cdb9f906d9812d9f640f2ba"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_labels.txt": {
"num_bytes": 80,
"checksum": "0cb133ab9b137292f075210db45f7e293dc52798a4e21e59037bfcfe66c97aa6"
}
},
"download_size": 60463,
"post_processing_size": null,
"dataset_size": 64987,
"size_in_bytes": 125450
},
"stance_feminist": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_feminist",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 70513,
"num_examples": 597,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 33309,
"num_examples": 285,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 8039,
"num_examples": 67,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_text.txt": {
"num_bytes": 64539,
"checksum": "c176e6663973c8e78bfa92ba1e8874a70cc5358567d71584a90943bc6525eaab"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_labels.txt": {
"num_bytes": 1194,
"checksum": "abd4f196d801423bb0daba8c0ecf5b3efba1f10e8f410c3dfa360b50c8b9c685"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_text.txt": {
"num_bytes": 30455,
"checksum": "1bfdbdc2af64fd62dcc775d1288e192ac8ff805ef27ccf3aaac54a98616eefda"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_labels.txt": {
"num_bytes": 570,
"checksum": "ddbde6d253ee47c5d5ef8bc5386270fde45cf088d3be70bba9c382b8a024897a"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_text.txt": {
"num_bytes": 7365,
"checksum": "3518b2ddcf696626a7243d7cea720a975718c7a52a5a086931be87897c1de58b"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_labels.txt": {
"num_bytes": 134,
"checksum": "399e0d468d0e4ead7a445f69efdf35876c835acf4cefc00a16f451a5d42e5c13"
}
},
"download_size": 104257,
"post_processing_size": null,
"dataset_size": 111861,
"size_in_bytes": 216118
},
"stance_hillary": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_hillary",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 69600,
"num_examples": 620,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 34491,
"num_examples": 295,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 7536,
"num_examples": 69,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_text.txt": {
"num_bytes": 63398,
"checksum": "0bd735de895cb74d63c224e64e3d955cac99be97aa225f803fe4d2f5978a2c99"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_labels.txt": {
"num_bytes": 1240,
"checksum": "0ea5753d13a717a9e91581d1d89c0b5206c8f905f0a717b2b27d02dbf419250d"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_text.txt": {
"num_bytes": 31537,
"checksum": "5c4e020285a62cfd88f264849e1db242ded356c171b1a68dd0050b76635053aa"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_labels.txt": {
"num_bytes": 590,
"checksum": "068468f6a72b85dfb65bf10e45f2453fa082d1ea9d7a40e7f560d5b6d75027f3"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_text.txt": {
"num_bytes": 6842,
"checksum": "9714b7dcc8617e095433d7b63df8aa155eb84216b9ac9195105ab83d85cd248d"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_labels.txt": {
"num_bytes": 138,
"checksum": "e5d44c771b7349a4a74309f56ca072fdf8f1c015068d519ca2ed3a931c833606"
}
},
"download_size": 103745,
"post_processing_size": null,
"dataset_size": 111627,
"size_in_bytes": 215372
}
}