imbesat-rizvi commited on
Commit
194d2b4
1 Parent(s): 31cf874

[Bug Fix] Resolved load error due to difference in num_examples between configs

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. newsgroups.py +72 -45
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"with_metadata": {"description": "The bydate version of the 20-newsgroup dataset fetched from scikit_learn and\nsplit in stratified manner into train, validation and test sets. With and\nwithout metadata is made available as individual config names. The test set\nfrom the original 20 newsgroup dataset is retained while the original train\nset is split 80:20 into train and validation sets in stratified manner based\non the newsgroup. The 20 different newsgroup are provided as the labels\ninstead of config names as specified in the official huggingface dataset.\nNewsgroups are specified as labels to provide a simplified setup for text\nclassification task. The 20 different newsgroup functioning as labels are:\n(1) alt.atheism\n(2) comp.graphics\n(3) comp.os.ms-windows.misc\n(4) comp.sys.ibm.pc.hardware\n(5) comp.sys.mac.hardware\n(6) comp.windows.x\n(7) misc.forsale\n(8) rec.autos\n(9) rec.motorcycles\n(10) rec.sport.baseball\n(11) rec.sport.hockey\n(12) sci.crypt\n(13) sci.electronics\n(14) sci.med\n(15) sci.space\n(16) soc.religion.christian\n(17) talk.politics.guns\n(18) talk.politics.mideast\n(19) talk.politics.misc\n(20) talk.religion.misc", "citation": "\n@inproceedings{Lang95,\n author = {Ken Lang},\n title = {Newsweeder: Learning to filter netnews}\n year = {1995}\n booktitle = {Proceedings of the Twelfth International Conference on Machine Learning}\n pages = {331-339}\n }\n ", "homepage": "http://qwone.com/~jason/20Newsgroups/", "license": "", "features": {"text": {"dtype": "large_string", "id": null, "_type": "Value"}, "labels": {"num_classes": 20, "names": ["alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt", "sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "newsgroups", "config_name": "with_metadata", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17065029, "num_examples": 9051, "dataset_name": "newsgroups"}, "validation": {"name": "validation", "num_bytes": 4279761, "num_examples": 2263, "dataset_name": "newsgroups"}, "test": {"name": "test", "num_bytes": 13328728, "num_examples": 7532, "dataset_name": "newsgroups"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 34673518, "size_in_bytes": 34673518}, "without_metadata": {"description": "The bydate version of the 20-newsgroup dataset fetched from scikit_learn and\nsplit in stratified manner into train, validation and test sets. With and\nwithout metadata is made available as individual config names. The test set\nfrom the original 20 newsgroup dataset is retained while the original train\nset is split 80:20 into train and validation sets in stratified manner based\non the newsgroup. The 20 different newsgroup are provided as the labels\ninstead of config names as specified in the official huggingface dataset.\nNewsgroups are specified as labels to provide a simplified setup for text\nclassification task. The 20 different newsgroup functioning as labels are:\n(1) alt.atheism\n(2) comp.graphics\n(3) comp.os.ms-windows.misc\n(4) comp.sys.ibm.pc.hardware\n(5) comp.sys.mac.hardware\n(6) comp.windows.x\n(7) misc.forsale\n(8) rec.autos\n(9) rec.motorcycles\n(10) rec.sport.baseball\n(11) rec.sport.hockey\n(12) sci.crypt\n(13) sci.electronics\n(14) sci.med\n(15) sci.space\n(16) soc.religion.christian\n(17) talk.politics.guns\n(18) talk.politics.mideast\n(19) talk.politics.misc\n(20) talk.religion.misc", "citation": "\n@inproceedings{Lang95,\n author = {Ken Lang},\n title = {Newsweeder: Learning to filter netnews}\n year = {1995}\n booktitle = {Proceedings of the Twelfth International Conference on Machine Learning}\n pages = {331-339}\n }\n ", "homepage": "http://qwone.com/~jason/20Newsgroups/", "license": "", "features": {"text": {"dtype": "large_string", "id": null, "_type": "Value"}, "labels": {"num_classes": 20, "names": ["alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt", "sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "newsgroups", "config_name": "without_metadata", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10649695, "num_examples": 9051, "dataset_name": "newsgroups"}, "validation": {"name": "validation", "num_bytes": 2755338, "num_examples": 2263, "dataset_name": "newsgroups"}, "test": {"name": "test", "num_bytes": 8011416, "num_examples": 7532, "dataset_name": "newsgroups"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21416449, "size_in_bytes": 21416449}}
 
1
+ {"with_metadata": {"description": "The bydate version of the 20-newsgroup dataset fetched from scikit_learn and\nsplit in stratified manner into train, validation and test sets. With and\nwithout metadata is made available as individual config names. The test set\nfrom the original 20 newsgroup dataset is retained while the original train\nset is split 80:20 into train and validation sets in stratified manner based\non the newsgroup. The 20 different newsgroup are provided as the labels\ninstead of config names as specified in the official huggingface dataset.\nNewsgroups are specified as labels to provide a simplified setup for text\nclassification task. The 20 different newsgroup functioning as labels are:\n(1) alt.atheism\n(2) comp.graphics\n(3) comp.os.ms-windows.misc\n(4) comp.sys.ibm.pc.hardware\n(5) comp.sys.mac.hardware\n(6) comp.windows.x\n(7) misc.forsale\n(8) rec.autos\n(9) rec.motorcycles\n(10) rec.sport.baseball\n(11) rec.sport.hockey\n(12) sci.crypt\n(13) sci.electronics\n(14) sci.med\n(15) sci.space\n(16) soc.religion.christian\n(17) talk.politics.guns\n(18) talk.politics.mideast\n(19) talk.politics.misc\n(20) talk.religion.misc", "citation": "\n@inproceedings{Lang95,\n author = {Ken Lang},\n title = {Newsweeder: Learning to filter netnews}\n year = {1995}\n booktitle = {Proceedings of the Twelfth International Conference on Machine Learning}\n pages = {331-339}\n }\n ", "homepage": "http://qwone.com/~jason/20Newsgroups/", "license": "", "features": {"text": {"dtype": "large_string", "id": null, "_type": "Value"}, "labels": {"num_classes": 20, "names": ["alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt", "sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "newsgroups", "config_name": "with_metadata", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17065029, "num_examples": 9051, "dataset_name": "newsgroups"}, "validation": {"name": "validation", "num_bytes": 4279761, "num_examples": 2263, "dataset_name": "newsgroups"}, "test": {"name": "test", "num_bytes": 13328728, "num_examples": 7532, "dataset_name": "newsgroups"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 34673518, "size_in_bytes": 34673518}, "without_metadata": {"description": "The bydate version of the 20-newsgroup dataset fetched from scikit_learn and\nsplit in stratified manner into train, validation and test sets. With and\nwithout metadata is made available as individual config names. The test set\nfrom the original 20 newsgroup dataset is retained while the original train\nset is split 80:20 into train and validation sets in stratified manner based\non the newsgroup. The 20 different newsgroup are provided as the labels\ninstead of config names as specified in the official huggingface dataset.\nNewsgroups are specified as labels to provide a simplified setup for text\nclassification task. The 20 different newsgroup functioning as labels are:\n(1) alt.atheism\n(2) comp.graphics\n(3) comp.os.ms-windows.misc\n(4) comp.sys.ibm.pc.hardware\n(5) comp.sys.mac.hardware\n(6) comp.windows.x\n(7) misc.forsale\n(8) rec.autos\n(9) rec.motorcycles\n(10) rec.sport.baseball\n(11) rec.sport.hockey\n(12) sci.crypt\n(13) sci.electronics\n(14) sci.med\n(15) sci.space\n(16) soc.religion.christian\n(17) talk.politics.guns\n(18) talk.politics.mideast\n(19) talk.politics.misc\n(20) talk.religion.misc", "citation": "\n@inproceedings{Lang95,\n author = {Ken Lang},\n title = {Newsweeder: Learning to filter netnews}\n year = {1995}\n booktitle = {Proceedings of the Twelfth International Conference on Machine Learning}\n pages = {331-339}\n }\n ", "homepage": "http://qwone.com/~jason/20Newsgroups/", "license": "", "features": {"text": {"dtype": "large_string", "id": null, "_type": "Value"}, "labels": {"num_classes": 20, "names": ["alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt", "sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "newsgroups", "config_name": "without_metadata", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10852391, "num_examples": 8811, "dataset_name": "newsgroups"}, "validation": {"name": "validation", "num_bytes": 2547760, "num_examples": 2203, "dataset_name": "newsgroups"}, "test": {"name": "test", "num_bytes": 8007923, "num_examples": 7317, "dataset_name": "newsgroups"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21408074, "size_in_bytes": 21408074}}
newsgroups.py CHANGED
@@ -5,29 +5,30 @@ from sklearn.model_selection import train_test_split
5
  import pandas as pd
6
 
7
  _NEWSGROUPS = [
8
- 'alt.atheism',
9
- 'comp.graphics',
10
- 'comp.os.ms-windows.misc',
11
- 'comp.sys.ibm.pc.hardware',
12
- 'comp.sys.mac.hardware',
13
- 'comp.windows.x',
14
- 'misc.forsale',
15
- 'rec.autos',
16
- 'rec.motorcycles',
17
- 'rec.sport.baseball',
18
- 'rec.sport.hockey',
19
- 'sci.crypt',
20
- 'sci.electronics',
21
- 'sci.med',
22
- 'sci.space',
23
- 'soc.religion.christian',
24
- 'talk.politics.guns',
25
- 'talk.politics.mideast',
26
- 'talk.politics.misc',
27
- 'talk.religion.misc',
28
- ]
29
-
30
- _DESCRIPTION = textwrap.dedent("""\
 
31
  The bydate version of the 20-newsgroup dataset fetched from scikit_learn and
32
  split in stratified manner into train, validation and test sets. With and
33
  without metadata is made available as individual config names. The test set
@@ -39,7 +40,7 @@ _DESCRIPTION = textwrap.dedent("""\
39
  classification task. The 20 different newsgroup functioning as labels are:
40
  """
41
  )
42
- _DESCRIPTION += "\n".join(f"({i+1}) {j}" for i,j in enumerate(_NEWSGROUPS))
43
 
44
  _HOMEPAGE = "http://qwone.com/~jason/20Newsgroups/"
45
 
@@ -55,18 +56,19 @@ _CITATION = """
55
 
56
  _VERSION = datasets.utils.Version("2.0.0")
57
 
58
- class NewsgroupsConfig(datasets.BuilderConfig):
59
 
 
60
  def __init__(self, **kwargs):
61
  super(NewsgroupsConfig, self).__init__(version=_VERSION, **kwargs)
62
 
63
-
64
  class Newsgroups(datasets.GeneratorBasedBuilder):
65
 
66
  BUILDER_CONFIGS = [
67
  NewsgroupsConfig(
68
  name="with_metadata",
69
- description=textwrap.dedent("""\
 
70
  The original complete bydate 20-Newsgroups dataset with the headers,
71
  footers, and quotes metadata as intact and just the continuous
72
  whitespaces (including new-line) replaced by single whitespace
@@ -75,7 +77,8 @@ class Newsgroups(datasets.GeneratorBasedBuilder):
75
  ),
76
  NewsgroupsConfig(
77
  name="without_metadata",
78
- description=textwrap.dedent("""\
 
79
  The bydate 20-Newsgroups dataset without the headers, footers,
80
  and quotes metadata as well as the continuous whitespaces
81
  (including new-line) replaced by single whitespace characters."""
@@ -100,30 +103,55 @@ class Newsgroups(datasets.GeneratorBasedBuilder):
100
  citation=_CITATION,
101
  )
102
 
103
-
104
  def _split_generators(self, dl_manager):
105
-
106
  if self.config.name == "with_metadata":
107
  train_data = fetch_20newsgroups(subset="train", random_state=42)
108
  test_data = fetch_20newsgroups(subset="test", random_state=42)
109
 
110
  else:
111
- train_data = fetch_20newsgroups(subset="train", random_state=42, remove=("headers", "footers", "quotes"))
112
- test_data = fetch_20newsgroups(subset="test", random_state=42, remove=("headers", "footers", "quotes"))
113
-
114
- train_labels = [train_data["target_names"][i] for i in train_data["target"]]
115
- test_labels = [test_data["target_names"][i] for i in test_data["target"]]
116
-
117
- train_df = pd.DataFrame({"text": train_data["data"], "labels": train_labels})
118
- test_df = pd.DataFrame({"text": test_data["data"], "labels": test_labels})
119
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  train_df["text"] = train_df["text"].str.replace("\s+", " ", regex=True)
121
  test_df["text"] = test_df["text"].str.replace("\s+", " ", regex=True)
122
-
123
- train_df = train_df[train_df["text"].str.strip()!=""]
124
- test_df = test_df[test_df["text"].str.strip()!=""]
125
 
126
- train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=42, stratify=train_df["labels"])
 
 
 
 
 
127
  train_df = train_df.reset_index(drop=True)
128
  val_df = val_df.reset_index(drop=True)
129
 
@@ -139,7 +167,6 @@ class Newsgroups(datasets.GeneratorBasedBuilder):
139
  ),
140
  ]
141
 
142
-
143
  def _generate_examples(self, df):
144
  for idx, row in df.iterrows():
145
- yield idx, row.to_dict()
 
5
  import pandas as pd
6
 
7
  _NEWSGROUPS = [
8
+ "alt.atheism",
9
+ "comp.graphics",
10
+ "comp.os.ms-windows.misc",
11
+ "comp.sys.ibm.pc.hardware",
12
+ "comp.sys.mac.hardware",
13
+ "comp.windows.x",
14
+ "misc.forsale",
15
+ "rec.autos",
16
+ "rec.motorcycles",
17
+ "rec.sport.baseball",
18
+ "rec.sport.hockey",
19
+ "sci.crypt",
20
+ "sci.electronics",
21
+ "sci.med",
22
+ "sci.space",
23
+ "soc.religion.christian",
24
+ "talk.politics.guns",
25
+ "talk.politics.mideast",
26
+ "talk.politics.misc",
27
+ "talk.religion.misc",
28
+ ]
29
+
30
+ _DESCRIPTION = textwrap.dedent(
31
+ """\
32
  The bydate version of the 20-newsgroup dataset fetched from scikit_learn and
33
  split in stratified manner into train, validation and test sets. With and
34
  without metadata is made available as individual config names. The test set
 
40
  classification task. The 20 different newsgroup functioning as labels are:
41
  """
42
  )
43
+ _DESCRIPTION += "\n".join(f"({i+1}) {j}" for i, j in enumerate(_NEWSGROUPS))
44
 
45
  _HOMEPAGE = "http://qwone.com/~jason/20Newsgroups/"
46
 
 
56
 
57
  _VERSION = datasets.utils.Version("2.0.0")
58
 
 
59
 
60
+ class NewsgroupsConfig(datasets.BuilderConfig):
61
  def __init__(self, **kwargs):
62
  super(NewsgroupsConfig, self).__init__(version=_VERSION, **kwargs)
63
 
64
+
65
  class Newsgroups(datasets.GeneratorBasedBuilder):
66
 
67
  BUILDER_CONFIGS = [
68
  NewsgroupsConfig(
69
  name="with_metadata",
70
+ description=textwrap.dedent(
71
+ """\
72
  The original complete bydate 20-Newsgroups dataset with the headers,
73
  footers, and quotes metadata as intact and just the continuous
74
  whitespaces (including new-line) replaced by single whitespace
 
77
  ),
78
  NewsgroupsConfig(
79
  name="without_metadata",
80
+ description=textwrap.dedent(
81
+ """\
82
  The bydate 20-Newsgroups dataset without the headers, footers,
83
  and quotes metadata as well as the continuous whitespaces
84
  (including new-line) replaced by single whitespace characters."""
 
103
  citation=_CITATION,
104
  )
105
 
 
106
  def _split_generators(self, dl_manager):
107
+
108
  if self.config.name == "with_metadata":
109
  train_data = fetch_20newsgroups(subset="train", random_state=42)
110
  test_data = fetch_20newsgroups(subset="test", random_state=42)
111
 
112
  else:
113
+ train_data = fetch_20newsgroups(
114
+ subset="train", random_state=42, remove=("headers", "footers", "quotes")
115
+ )
116
+ test_data = fetch_20newsgroups(
117
+ subset="test", random_state=42, remove=("headers", "footers", "quotes")
118
+ )
119
+
120
+ empty_data_idcs = set(
121
+ [i for i, j in enumerate(train_data.data) if j.strip() == ""]
122
+ )
123
+ train_data.data = [
124
+ j for i, j in enumerate(train_data.data) if i not in empty_data_idcs
125
+ ]
126
+ train_data.target = [
127
+ j for i, j in enumerate(train_data.target) if i not in empty_data_idcs
128
+ ]
129
+
130
+ empty_data_idcs = set(
131
+ [i for i, j in enumerate(test_data.data) if j.strip() == ""]
132
+ )
133
+ test_data.data = [
134
+ j for i, j in enumerate(test_data.data) if i not in empty_data_idcs
135
+ ]
136
+ test_data.target = [
137
+ j for i, j in enumerate(test_data.target) if i not in empty_data_idcs
138
+ ]
139
+
140
+ train_labels = [train_data.target_names[i] for i in train_data.target]
141
+ test_labels = [test_data.target_names[i] for i in test_data.target]
142
+
143
+ train_df = pd.DataFrame({"text": train_data.data, "labels": train_labels})
144
+ test_df = pd.DataFrame({"text": test_data.data, "labels": test_labels})
145
+
146
  train_df["text"] = train_df["text"].str.replace("\s+", " ", regex=True)
147
  test_df["text"] = test_df["text"].str.replace("\s+", " ", regex=True)
 
 
 
148
 
149
+ # train_df = train_df[train_df["text"].str.strip()!=""]
150
+ # test_df = test_df[test_df["text"].str.strip()!=""]
151
+
152
+ train_df, val_df = train_test_split(
153
+ train_df, test_size=0.2, random_state=42, stratify=train_df["labels"]
154
+ )
155
  train_df = train_df.reset_index(drop=True)
156
  val_df = val_df.reset_index(drop=True)
157
 
 
167
  ),
168
  ]
169
 
 
170
  def _generate_examples(self, df):
171
  for idx, row in df.iterrows():
172
+ yield idx, row.to_dict()