Datasets:

Sub-tasks:
fact-checking
Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
crowdsourced
ArXiv:
Tags:
stance-detection
License:
mkon commited on
Commit
98d0ff2
1 Parent(s): 6dd037e

test split done

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. rumoureval_2019.py +4 -3
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"train": {"description": "This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "source_text": {"dtype": "string", "id": null, "_type": "Value"}, "reply_text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["support", "query", "deny", "comment"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "rumour_eval2019", "config_name": "train", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1242200, "num_examples": 4879, "dataset_name": "rumour_eval2019"}}, "download_checksums": {"rumoureval2019_train.csv": {"num_bytes": 1203917, "checksum": "134c036e34da708f0edb22b3cc688054d6395d1669eef78e4afa0fd9a4ed4c43"}}, "download_size": 1203917, "post_processing_size": null, "dataset_size": 1242200, "size_in_bytes": 2446117}, "RumourEval2019": {"description": "This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "source_text": {"dtype": "string", "id": null, "_type": "Value"}, "reply_text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["support", "query", "deny", "comment"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "rumour_eval2019", "config_name": "RumourEval2019", "version": {"version_str": "0.9.0", "description": null, "major": 0, "minor": 9, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1242200, "num_examples": 4879, "dataset_name": "rumour_eval2019"}, "validation": {"name": "validation", "num_bytes": 412707, "num_examples": 1440, "dataset_name": "rumour_eval2019"}}, "download_checksums": {"rumoureval2019_train.csv": {"num_bytes": 1203917, "checksum": "134c036e34da708f0edb22b3cc688054d6395d1669eef78e4afa0fd9a4ed4c43"}, "rumoureval2019_val.csv": {"num_bytes": 402303, "checksum": "6cc859c2eff320ba002866e0b78f7e956b78d58e9e3a7843798b2dd9c23de201"}}, "download_size": 1606220, "post_processing_size": null, "dataset_size": 1654907, "size_in_bytes": 3261127}}
1
+ {"RumourEval2019": {"description": "This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "source_text": {"dtype": "string", "id": null, "_type": "Value"}, "reply_text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["support", "query", "deny", "comment"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "rumour_eval2019", "config_name": "RumourEval2019", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1242200, "num_examples": 4879, "dataset_name": "rumour_eval2019"}, "validation": {"name": "validation", "num_bytes": 412707, "num_examples": 1440, "dataset_name": "rumour_eval2019"}, "test": {"name": "test", "num_bytes": 491431, "num_examples": 1675, "dataset_name": "rumour_eval2019"}}, "download_checksums": {"rumoureval2019_train.csv": {"num_bytes": 1203917, "checksum": "134c036e34da708f0edb22b3cc688054d6395d1669eef78e4afa0fd9a4ed4c43"}, "rumoureval2019_val.csv": {"num_bytes": 402303, "checksum": "6cc859c2eff320ba002866e0b78f7e956b78d58e9e3a7843798b2dd9c23de201"}, "rumoureval2019_test.csv": {"num_bytes": 479250, "checksum": "7d103bfb55cdef3b0d26c481ceb772159ae824aa15bf26e8b26dc87a58c55508"}}, "download_size": 2085470, "post_processing_size": null, "dataset_size": 2146338, "size_in_bytes": 4231808}}
rumoureval_2019.py CHANGED
@@ -53,7 +53,7 @@ class RumourEval2019Config(datasets.BuilderConfig):
53
  class RumourEval2019(datasets.GeneratorBasedBuilder):
54
  """RumourEval2019 Stance Detection Dataset formatted in triples of (source_text, reply_text, label)"""
55
 
56
- VERSION = datasets.Version("0.9.0")
57
 
58
  BUILDER_CONFIGS = [
59
  RumourEval2019Config(name="RumourEval2019", version=VERSION, description="Stance Detection Dataset"),
@@ -88,10 +88,11 @@ class RumourEval2019(datasets.GeneratorBasedBuilder):
88
  train_text = dl_manager.download_and_extract("rumoureval2019_train.csv")
89
  validation_text = dl_manager.download_and_extract("rumoureval2019_val.csv")
90
  test_text = dl_manager.download_and_extract("rumoureval2019_test.csv")
 
91
  return [
92
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": "train"}),
93
  datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_text, "split": "validation"}),
94
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_text, "split": "test"}),
95
  ]
96
 
97
  def _generate_examples(self, filepath, split):
53
  class RumourEval2019(datasets.GeneratorBasedBuilder):
54
  """RumourEval2019 Stance Detection Dataset formatted in triples of (source_text, reply_text, label)"""
55
 
56
+ VERSION = datasets.Version("1.0.0")
57
 
58
  BUILDER_CONFIGS = [
59
  RumourEval2019Config(name="RumourEval2019", version=VERSION, description="Stance Detection Dataset"),
88
  train_text = dl_manager.download_and_extract("rumoureval2019_train.csv")
89
  validation_text = dl_manager.download_and_extract("rumoureval2019_val.csv")
90
  test_text = dl_manager.download_and_extract("rumoureval2019_test.csv")
91
+
92
  return [
93
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": "train"}),
94
  datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_text, "split": "validation"}),
95
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_text, "split": "test"}),
96
  ]
97
 
98
  def _generate_examples(self, filepath, split):