Datasets:
Tasks:
Text Classification
Sub-tasks:
multi-class-classification
Languages:
English
Size:
10K<n<100K
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- newsgroup.py +21 -15
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
languages:
|
3 |
- en
|
4 |
paperswithcode_id: null
|
|
|
1 |
---
|
2 |
+
pretty_name: 20 Newsgroups
|
3 |
languages:
|
4 |
- en
|
5 |
paperswithcode_id: null
|
newsgroup.py
CHANGED
@@ -17,8 +17,6 @@
|
|
17 |
"""20Newsgroup dataset"""
|
18 |
|
19 |
|
20 |
-
import os
|
21 |
-
|
22 |
import datasets
|
23 |
|
24 |
|
@@ -121,41 +119,49 @@ class Newsgroups(datasets.GeneratorBasedBuilder):
|
|
121 |
|
122 |
def _split_generators(self, dl_manager):
|
123 |
url = _DOWNLOAD_URL[self.config.name.split("_")[0]]
|
124 |
-
|
125 |
if self.config.name.startswith("bydate"):
|
126 |
|
127 |
return [
|
128 |
datasets.SplitGenerator(
|
129 |
name=datasets.Split.TRAIN,
|
130 |
-
gen_kwargs={
|
|
|
|
|
|
|
131 |
),
|
132 |
datasets.SplitGenerator(
|
133 |
name=datasets.Split.TEST,
|
134 |
-
gen_kwargs={
|
|
|
|
|
|
|
135 |
),
|
136 |
]
|
137 |
elif self.config.name.startswith("19997"):
|
138 |
return [
|
139 |
datasets.SplitGenerator(
|
140 |
name=datasets.Split.TRAIN,
|
141 |
-
gen_kwargs={
|
|
|
|
|
|
|
142 |
)
|
143 |
]
|
144 |
else:
|
145 |
return [
|
146 |
datasets.SplitGenerator(
|
147 |
name=datasets.Split.TRAIN,
|
148 |
-
gen_kwargs={
|
|
|
|
|
|
|
149 |
)
|
150 |
]
|
151 |
|
152 |
-
def _generate_examples(self,
|
153 |
"""Yields examples."""
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
with open(
|
158 |
-
filepath, encoding="utf8", errors="ignore"
|
159 |
-
) as f: # here we can ignore byte encoded tokens. we only have a very few and in most case it happens at the end of the file (kind of \FF)
|
160 |
-
text = f.read()
|
161 |
yield id_, {"text": text}
|
|
|
17 |
"""20Newsgroup dataset"""
|
18 |
|
19 |
|
|
|
|
|
20 |
import datasets
|
21 |
|
22 |
|
|
|
119 |
|
120 |
def _split_generators(self, dl_manager):
|
121 |
url = _DOWNLOAD_URL[self.config.name.split("_")[0]]
|
122 |
+
archive = dl_manager.download(url)
|
123 |
if self.config.name.startswith("bydate"):
|
124 |
|
125 |
return [
|
126 |
datasets.SplitGenerator(
|
127 |
name=datasets.Split.TRAIN,
|
128 |
+
gen_kwargs={
|
129 |
+
"files_dir": "20news-bydate-train/" + self.config.sub_dir,
|
130 |
+
"files": dl_manager.iter_archive(archive),
|
131 |
+
},
|
132 |
),
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TEST,
|
135 |
+
gen_kwargs={
|
136 |
+
"files_dir": "20news-bydate-test/" + self.config.sub_dir,
|
137 |
+
"files": dl_manager.iter_archive(archive),
|
138 |
+
},
|
139 |
),
|
140 |
]
|
141 |
elif self.config.name.startswith("19997"):
|
142 |
return [
|
143 |
datasets.SplitGenerator(
|
144 |
name=datasets.Split.TRAIN,
|
145 |
+
gen_kwargs={
|
146 |
+
"files_dir": "20_newsgroups/" + self.config.sub_dir,
|
147 |
+
"files": dl_manager.iter_archive(archive),
|
148 |
+
},
|
149 |
)
|
150 |
]
|
151 |
else:
|
152 |
return [
|
153 |
datasets.SplitGenerator(
|
154 |
name=datasets.Split.TRAIN,
|
155 |
+
gen_kwargs={
|
156 |
+
"files_dir": "20news-18828/" + self.config.sub_dir,
|
157 |
+
"files": dl_manager.iter_archive(archive),
|
158 |
+
},
|
159 |
)
|
160 |
]
|
161 |
|
162 |
+
def _generate_examples(self, files_dir, files):
|
163 |
"""Yields examples."""
|
164 |
+
for id_, (path, f) in enumerate(files):
|
165 |
+
if path.startswith(files_dir):
|
166 |
+
text = f.read().decode("utf-8", errors="ignore")
|
|
|
|
|
|
|
|
|
167 |
yield id_, {"text": text}
|