updating for data streaming
Browse files- AmericanStories.py +17 -26
AmericanStories.py
CHANGED
@@ -1,11 +1,5 @@
|
|
1 |
import json
|
2 |
-
import tarfile
|
3 |
-
from datasets import DatasetInfo, DatasetBuilder, DownloadManager, BuilderConfig, SplitGenerator, Split, Version
|
4 |
import datasets
|
5 |
-
import os
|
6 |
-
import requests
|
7 |
-
import re
|
8 |
-
from tqdm import tqdm
|
9 |
|
10 |
SUPPORTED_YEARS = ["1774"]
|
11 |
# Add years from 1798 to 1964 to the supported years
|
@@ -19,10 +13,11 @@ def make_year_file_splits():
|
|
19 |
dict: A dictionary mapping each year to its corresponding file URL.
|
20 |
list: A list of years.
|
21 |
"""
|
|
|
22 |
base_url = "https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/"
|
23 |
|
24 |
# Make a list of years from 1774 to 1960
|
25 |
-
# TODO:
|
26 |
year_list = SUPPORTED_YEARS
|
27 |
data_files = [f"faro_{year}.tar.gz" for year in year_list]
|
28 |
url_list = [base_url + file for file in data_files]
|
@@ -47,7 +42,7 @@ _FILE_DICT, _YEARS = make_year_file_splits()
|
|
47 |
class CustomBuilderConfig(datasets.BuilderConfig):
|
48 |
"""BuilderConfig for AmericanStories dataset with different configurations."""
|
49 |
|
50 |
-
def __init__(self, year_list=None,
|
51 |
"""
|
52 |
BuilderConfig for AmericanStories dataset.
|
53 |
|
@@ -57,8 +52,6 @@ class CustomBuilderConfig(datasets.BuilderConfig):
|
|
57 |
"""
|
58 |
super(CustomBuilderConfig, self).__init__(**kwargs)
|
59 |
self.year_list = year_list
|
60 |
-
self.features = features
|
61 |
-
|
62 |
|
63 |
class AmericanStories(datasets.GeneratorBasedBuilder):
|
64 |
"""Dataset builder class for AmericanStories dataset."""
|
@@ -76,7 +69,6 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
76 |
version=VERSION,
|
77 |
description="Subset of years in the dataset",
|
78 |
year_list=["1774", "1804"]
|
79 |
-
|
80 |
),
|
81 |
CustomBuilderConfig(
|
82 |
name="all_years_content_regions",
|
@@ -88,10 +80,11 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
88 |
version=VERSION,
|
89 |
description="Subset of years in the dataset",
|
90 |
year_list=["1774", "1804"],
|
91 |
-
|
92 |
)
|
93 |
]
|
|
|
94 |
DEFAULT_CONFIG_NAME = "subset_years"
|
|
|
95 |
|
96 |
def _info(self):
|
97 |
"""
|
@@ -111,8 +104,6 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
111 |
"headline": datasets.Value("string"),
|
112 |
"byline": datasets.Value("string"),
|
113 |
"article": datasets.Value("string"),
|
114 |
-
|
115 |
-
|
116 |
}
|
117 |
)
|
118 |
else:
|
@@ -150,24 +141,25 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
150 |
|
151 |
# Subset _FILE_DICT and year_list to only include years in config.year_list
|
152 |
if self.config.year_list:
|
153 |
-
urls = {year: urls[year] for year in self.config.year_list}
|
154 |
year_list = self.config.year_list
|
155 |
|
156 |
-
|
157 |
|
158 |
# Return a list of splits, where each split corresponds to a year
|
159 |
return [
|
160 |
datasets.SplitGenerator(
|
161 |
name=year,
|
162 |
gen_kwargs={
|
163 |
-
"
|
|
|
164 |
"split": year,
|
165 |
"associated": True if not self.config.name.endswith("content_regions") else False,
|
166 |
},
|
167 |
) for year in year_list
|
168 |
]
|
169 |
|
170 |
-
def _generate_examples(self, year_dir,split, associated):
|
171 |
"""
|
172 |
Generates examples for the specified year and split.
|
173 |
|
@@ -180,10 +172,10 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
180 |
"""
|
181 |
print("Associated: " + str(associated))
|
182 |
if associated:
|
183 |
-
for filepath in
|
184 |
-
|
185 |
try :
|
186 |
-
data = json.
|
187 |
except:
|
188 |
print("Error loading file: " + filepath)
|
189 |
continue
|
@@ -208,20 +200,19 @@ class AmericanStories(datasets.GeneratorBasedBuilder):
|
|
208 |
}
|
209 |
else:
|
210 |
print("Returning a json as a string, feel free to parse it yourself!")
|
211 |
-
for filepath in
|
212 |
-
|
213 |
try :
|
214 |
-
data = json.
|
215 |
except:
|
216 |
# print("Error loading file: " + filepath)
|
217 |
continue
|
218 |
###Convert json to strng
|
219 |
data=json.dumps(data)
|
220 |
-
print((data))
|
221 |
-
print(type(data))
|
222 |
scan_id=filepath.split('.')[0]
|
223 |
##Yield the scan id and the raw data string
|
224 |
yield scan_id, {
|
225 |
"raw_data_string": str(data)
|
226 |
}
|
227 |
|
|
|
|
1 |
import json
|
|
|
|
|
2 |
import datasets
|
|
|
|
|
|
|
|
|
3 |
|
4 |
SUPPORTED_YEARS = ["1774"]
|
5 |
# Add years from 1798 to 1964 to the supported years
|
|
|
13 |
dict: A dictionary mapping each year to its corresponding file URL.
|
14 |
list: A list of years.
|
15 |
"""
|
16 |
+
|
17 |
base_url = "https://huggingface.co/datasets/dell-research-harvard/AmericanStories/resolve/main/"
|
18 |
|
19 |
# Make a list of years from 1774 to 1960
|
20 |
+
# TODO: can we set up to check the actual files in the repo instead of relying on the offline list?
|
21 |
year_list = SUPPORTED_YEARS
|
22 |
data_files = [f"faro_{year}.tar.gz" for year in year_list]
|
23 |
url_list = [base_url + file for file in data_files]
|
|
|
42 |
class CustomBuilderConfig(datasets.BuilderConfig):
|
43 |
"""BuilderConfig for AmericanStories dataset with different configurations."""
|
44 |
|
45 |
+
def __init__(self, year_list=None, **kwargs):
|
46 |
"""
|
47 |
BuilderConfig for AmericanStories dataset.
|
48 |
|
|
|
52 |
"""
|
53 |
super(CustomBuilderConfig, self).__init__(**kwargs)
|
54 |
self.year_list = year_list
|
|
|
|
|
55 |
|
56 |
class AmericanStories(datasets.GeneratorBasedBuilder):
|
57 |
"""Dataset builder class for AmericanStories dataset."""
|
|
|
69 |
version=VERSION,
|
70 |
description="Subset of years in the dataset",
|
71 |
year_list=["1774", "1804"]
|
|
|
72 |
),
|
73 |
CustomBuilderConfig(
|
74 |
name="all_years_content_regions",
|
|
|
80 |
version=VERSION,
|
81 |
description="Subset of years in the dataset",
|
82 |
year_list=["1774", "1804"],
|
|
|
83 |
)
|
84 |
]
|
85 |
+
|
86 |
DEFAULT_CONFIG_NAME = "subset_years"
|
87 |
+
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
88 |
|
89 |
def _info(self):
|
90 |
"""
|
|
|
104 |
"headline": datasets.Value("string"),
|
105 |
"byline": datasets.Value("string"),
|
106 |
"article": datasets.Value("string"),
|
|
|
|
|
107 |
}
|
108 |
)
|
109 |
else:
|
|
|
141 |
|
142 |
# Subset _FILE_DICT and year_list to only include years in config.year_list
|
143 |
if self.config.year_list:
|
144 |
+
urls = {year: urls[year] for year in self.config.year_list if year in SUPPORTED_YEARS}
|
145 |
year_list = self.config.year_list
|
146 |
|
147 |
+
archive = dl_manager.download(urls)
|
148 |
|
149 |
# Return a list of splits, where each split corresponds to a year
|
150 |
return [
|
151 |
datasets.SplitGenerator(
|
152 |
name=year,
|
153 |
gen_kwargs={
|
154 |
+
"files": dl_manager.iter_archive(archive[year]),
|
155 |
+
"year_dir": "/".join(["mnt", "122a7683-fa4b-45dd-9f13-b18cc4f4a187", "ca_rule_based_fa_clean", "faro_" + year]),
|
156 |
"split": year,
|
157 |
"associated": True if not self.config.name.endswith("content_regions") else False,
|
158 |
},
|
159 |
) for year in year_list
|
160 |
]
|
161 |
|
162 |
+
def _generate_examples(self, files, year_dir, split, associated):
|
163 |
"""
|
164 |
Generates examples for the specified year and split.
|
165 |
|
|
|
172 |
"""
|
173 |
print("Associated: " + str(associated))
|
174 |
if associated:
|
175 |
+
for filepath, f in files:
|
176 |
+
if filepath.startswith(year_dir):
|
177 |
try :
|
178 |
+
data = json.loads(f.read().decode('utf-8'))
|
179 |
except:
|
180 |
print("Error loading file: " + filepath)
|
181 |
continue
|
|
|
200 |
}
|
201 |
else:
|
202 |
print("Returning a json as a string, feel free to parse it yourself!")
|
203 |
+
for filepath, f in files:
|
204 |
+
if filepath.startswith(year_dir):
|
205 |
try :
|
206 |
+
data = json.loads(f.read().decode('utf-8'))
|
207 |
except:
|
208 |
# print("Error loading file: " + filepath)
|
209 |
continue
|
210 |
###Convert json to strng
|
211 |
data=json.dumps(data)
|
|
|
|
|
212 |
scan_id=filepath.split('.')[0]
|
213 |
##Yield the scan id and the raw data string
|
214 |
yield scan_id, {
|
215 |
"raw_data_string": str(data)
|
216 |
}
|
217 |
|
218 |
+
|