blnewman-uw
commited on
Commit
•
6d9f8d5
1
Parent(s):
52e72e5
Switch to downloading remote files
Browse files- README.md +2 -2
- arxivDIGESTables.py +5 -12
README.md
CHANGED
@@ -13,6 +13,6 @@ configs:
|
|
13 |
|
14 |
# ArXivDIGESTables
|
15 |
|
16 |
-
|
17 |
|
18 |
-
|
|
|
13 |
|
14 |
# ArXivDIGESTables
|
15 |
|
16 |
+
**Note**: Please use huggingface datasets as a way to preview the data only as `load_dataset` does not work well with the current data format. Instead it is recommended to directly download `tables.jsonl`, `papers.jsonl`, and `full_texts.jsonl.gz`.
|
17 |
|
18 |
+
See [the github repo](https://github.com/bnewm0609/arxivDIGESTables) for more details.
|
arxivDIGESTables.py
CHANGED
@@ -26,12 +26,7 @@ import datasets
|
|
26 |
# TODO: Add BibTeX citation
|
27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
28 |
_CITATION = """\
|
29 |
-
|
30 |
-
title = {A great new dataset},
|
31 |
-
author={huggingface, Inc.
|
32 |
-
},
|
33 |
-
year={2020}
|
34 |
-
}
|
35 |
"""
|
36 |
|
37 |
# TODO: Add description of the dataset here
|
@@ -49,8 +44,8 @@ _LICENSE = ""
|
|
49 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
50 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
51 |
_URLS = {
|
52 |
-
"high_quality": "
|
53 |
-
"medium_quality": "
|
54 |
}
|
55 |
|
56 |
|
@@ -151,7 +146,6 @@ class ArxivDIGESTables(datasets.GeneratorBasedBuilder):
|
|
151 |
)
|
152 |
|
153 |
def _split_generators(self, dl_manager):
|
154 |
-
print("hi", flush=True)
|
155 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
156 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
157 |
|
@@ -159,9 +153,8 @@ class ArxivDIGESTables(datasets.GeneratorBasedBuilder):
|
|
159 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
160 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
161 |
urls = _URLS[self.config.name]
|
162 |
-
|
163 |
-
|
164 |
-
data_path = urls
|
165 |
return [
|
166 |
datasets.SplitGenerator(
|
167 |
name=datasets.Split.VALIDATION,
|
|
|
26 |
# TODO: Add BibTeX citation
|
27 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
28 |
_CITATION = """\
|
29 |
+
TBD
|
|
|
|
|
|
|
|
|
|
|
30 |
"""
|
31 |
|
32 |
# TODO: Add description of the dataset here
|
|
|
44 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
45 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
46 |
_URLS = {
|
47 |
+
"high_quality": "https://huggingface.co/datasets/blnewman/arxivDIGESTables/resolve/main/tables.jsonl",
|
48 |
+
"medium_quality": "https://huggingface.co/datasets/blnewman/arxivDIGESTables/resolve/main/tables_medium_quality.jsonl",
|
49 |
}
|
50 |
|
51 |
|
|
|
146 |
)
|
147 |
|
148 |
def _split_generators(self, dl_manager):
|
|
|
149 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
150 |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
151 |
|
|
|
153 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
154 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
155 |
urls = _URLS[self.config.name]
|
156 |
+
data_path = dl_manager.download_and_extract(urls)
|
157 |
+
# data_path = urls
|
|
|
158 |
return [
|
159 |
datasets.SplitGenerator(
|
160 |
name=datasets.Split.VALIDATION,
|