Gabriel Kressin Palacios
commited on
Commit
•
f3eff50
1
Parent(s):
183c5df
dl_manager fix, checksums update
Browse files- README.md +4 -4
- dataset_infos.json +1 -1
- wikitext_linked.py +7 -7
README.md
CHANGED
@@ -95,8 +95,8 @@ English.
|
|
95 |
|
96 |
#### wikitext2
|
97 |
|
98 |
-
- **Size of downloaded dataset files:**
|
99 |
-
- **Size of the generated dataset:**
|
100 |
- **Total amount of disk used:** 197.2 MB
|
101 |
An example of 'validation' looks as follows.
|
102 |
|
@@ -120,8 +120,8 @@ An example of 'validation' looks as follows.
|
|
120 |
|
121 |
#### wikitext103
|
122 |
|
123 |
-
- **Size of downloaded dataset files:**
|
124 |
-
- **Size of the generated dataset:**
|
125 |
- **Total amount of disk used:** 7.82 GB
|
126 |
An example of 'train' looks as follows.
|
127 |
```
|
|
|
95 |
|
96 |
#### wikitext2
|
97 |
|
98 |
+
- **Size of downloaded dataset files:** 27.3 MB
|
99 |
+
- **Size of the generated dataset:** 197.2 MB
|
100 |
- **Total amount of disk used:** 197.2 MB
|
101 |
An example of 'validation' looks as follows.
|
102 |
|
|
|
120 |
|
121 |
#### wikitext103
|
122 |
|
123 |
+
- **Size of downloaded dataset files:** 1.11 GB
|
124 |
+
- **Size of the generated dataset:** 7.82 GB
|
125 |
- **Total amount of disk used:** 7.82 GB
|
126 |
An example of 'train' looks as follows.
|
127 |
```
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"wikitext2": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. Dependency Relations, POS, NER tags are marked with trankit and\n entities are linked with entity-fishing.\n The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n@inproceedings{nguyen2021trankit,\n title={Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing},\n author={Nguyen, Minh Van and Lai, Viet Dac and Veyseh, Amir Pouran Ben and Nguyen, Thien Huu},\n booktitle=\"Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations\",\n year={2021}\n}\n\n@misc{entity-fishing,\n title = {entity-fishing},\n howpublished = {\\url{https://github.com/kermitt2/entity-fishing}},\n publisher = {GitHub},\n year = {2016--2022},\n archivePrefix = {swh},\n eprint = {1:dir:cb0ba3379413db12b0018b7c3af8d0d2d864139c}\n}\n", "homepage": "", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "original_id": {"dtype": "int64", "id": null, "_type": "Value"}, "tok_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_upos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_xpos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_dephead": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_deprel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_lemma": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_wikipedia_external_ref": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_domains": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext_linked", "config_name": "wikitext2", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 168970698, "num_examples": 82649, "dataset_name": "wikitext_linked"}, "validation": {"name": "validation", "num_bytes": 17702375, "num_examples": 8606, "dataset_name": "wikitext_linked"}, "test": {"name": "test", "num_bytes": 20013719, "num_examples": 10062, "dataset_name": "wikitext_linked"}}, "download_checksums": {}, "download_size":
|
|
|
1 |
+
{"wikitext2": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. Dependency Relations, POS, NER tags are marked with trankit and\n entities are linked with entity-fishing.\n The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n@inproceedings{nguyen2021trankit,\n title={Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing},\n author={Nguyen, Minh Van and Lai, Viet Dac and Veyseh, Amir Pouran Ben and Nguyen, Thien Huu},\n booktitle=\"Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations\",\n year={2021}\n}\n\n@misc{entity-fishing,\n title = {entity-fishing},\n howpublished = {\\url{https://github.com/kermitt2/entity-fishing}},\n publisher = {GitHub},\n year = {2016--2022},\n archivePrefix = {swh},\n eprint = {1:dir:cb0ba3379413db12b0018b7c3af8d0d2d864139c}\n}\n", "homepage": "https://github.com/GabrielKP/svo/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "original_id": {"dtype": "int64", "id": null, "_type": "Value"}, "tok_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_upos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_xpos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_dephead": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_deprel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_lemma": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_wikipedia_external_ref": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_domains": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext_linked", "config_name": "wikitext2", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 168970698, "num_examples": 82649, "dataset_name": "wikitext_linked"}, "validation": {"name": "validation", "num_bytes": 17702375, "num_examples": 8606, "dataset_name": "wikitext_linked"}, "test": {"name": "test", "num_bytes": 20013719, "num_examples": 10062, "dataset_name": "wikitext_linked"}}, "download_checksums": {"https://huggingface.co/datasets/gabrielkp/wikitext_linked/resolve/main/wikitext2.zip": {"num_bytes": 27293137, "checksum": "9887782ff6dad83530d7bb4b4d4b120f4c9c08749ecc97800fa38772f2748b2f"}}, "download_size": 27293137, "post_processing_size": null, "dataset_size": 206686792, "size_in_bytes": 233979929}, "wikitext103": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. Dependency Relations, POS, NER tags are marked with trankit and\n entities are linked with entity-fishing.\n The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n@inproceedings{nguyen2021trankit,\n title={Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing},\n author={Nguyen, Minh Van and Lai, Viet Dac and Veyseh, Amir Pouran Ben and Nguyen, Thien Huu},\n booktitle=\"Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations\",\n year={2021}\n}\n\n@misc{entity-fishing,\n title = {entity-fishing},\n howpublished = {\\url{https://github.com/kermitt2/entity-fishing}},\n publisher = {GitHub},\n year = {2016--2022},\n archivePrefix = {swh},\n eprint = {1:dir:cb0ba3379413db12b0018b7c3af8d0d2d864139c}\n}\n", "homepage": "https://github.com/GabrielKP/svo/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "original_id": {"dtype": "int64", "id": null, "_type": "Value"}, "tok_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_upos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_xpos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_dephead": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_deprel": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_lemma": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "tok_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_span": {"feature": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_wikipedia_external_ref": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_ner": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ent_domains": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext_linked", "config_name": "wikitext103", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8356828955, "num_examples": 4076530, "dataset_name": "wikitext_linked"}, "validation": {"name": "validation", "num_bytes": 17702461, "num_examples": 8607, "dataset_name": "wikitext_linked"}, "test": {"name": "test", "num_bytes": 20013609, "num_examples": 10062, "dataset_name": "wikitext_linked"}}, "download_checksums": {"https://huggingface.co/datasets/gabrielkp/wikitext_linked/resolve/main/wikitext103.zip": {"num_bytes": 1106715044, "checksum": "1f612dc1711160c9c65ee7d4fabd51bfce94ab972761195b4641a466d31e2e92"}}, "download_size": 1106715044, "post_processing_size": null, "dataset_size": 8394545025, "size_in_bytes": 9501260069}}
|
wikitext_linked.py
CHANGED
@@ -82,7 +82,7 @@ FEATURES = datasets.Features(
|
|
82 |
}
|
83 |
)
|
84 |
|
85 |
-
_URL = "https://huggingface.co/datasets/gabrielkp/wikitext_linked/
|
86 |
|
87 |
|
88 |
class WikitextLinked(datasets.ArrowBasedBuilder):
|
@@ -98,13 +98,13 @@ class WikitextLinked(datasets.ArrowBasedBuilder):
|
|
98 |
name="wikitext2",
|
99 |
version=VERSION,
|
100 |
description="The small version",
|
101 |
-
data_dir="wikitext2
|
102 |
),
|
103 |
datasets.BuilderConfig(
|
104 |
name="wikitext103",
|
105 |
version=VERSION,
|
106 |
description="The big version",
|
107 |
-
data_dir="wikitext103
|
108 |
),
|
109 |
]
|
110 |
|
@@ -119,27 +119,27 @@ class WikitextLinked(datasets.ArrowBasedBuilder):
|
|
119 |
)
|
120 |
|
121 |
def _split_generators(self, dl_manager):
|
122 |
-
data_dir = dl_manager.download_and_extract(f"{_URL}{self.config.data_dir}")
|
123 |
return [
|
124 |
datasets.SplitGenerator(
|
125 |
name=datasets.Split.TRAIN,
|
126 |
# These kwargs will be passed to _generate_examples
|
127 |
gen_kwargs={
|
128 |
-
"filepath": os.path.join(data_dir, "train.parquet"),
|
129 |
},
|
130 |
),
|
131 |
datasets.SplitGenerator(
|
132 |
name=datasets.Split.VALIDATION,
|
133 |
# These kwargs will be passed to _generate_examples
|
134 |
gen_kwargs={
|
135 |
-
"filepath": os.path.join(data_dir, "validation.parquet"),
|
136 |
},
|
137 |
),
|
138 |
datasets.SplitGenerator(
|
139 |
name=datasets.Split.TEST,
|
140 |
# These kwargs will be passed to _generate_examples
|
141 |
gen_kwargs={
|
142 |
-
"filepath": os.path.join(data_dir, "test.parquet"),
|
143 |
},
|
144 |
),
|
145 |
]
|
|
|
82 |
}
|
83 |
)
|
84 |
|
85 |
+
_URL = "https://huggingface.co/datasets/gabrielkp/wikitext_linked/resolve/main/"
|
86 |
|
87 |
|
88 |
class WikitextLinked(datasets.ArrowBasedBuilder):
|
|
|
98 |
name="wikitext2",
|
99 |
version=VERSION,
|
100 |
description="The small version",
|
101 |
+
data_dir="wikitext2",
|
102 |
),
|
103 |
datasets.BuilderConfig(
|
104 |
name="wikitext103",
|
105 |
version=VERSION,
|
106 |
description="The big version",
|
107 |
+
data_dir="wikitext103",
|
108 |
),
|
109 |
]
|
110 |
|
|
|
119 |
)
|
120 |
|
121 |
def _split_generators(self, dl_manager):
|
122 |
+
data_dir = dl_manager.download_and_extract(f"{_URL}{self.config.data_dir}.zip")
|
123 |
return [
|
124 |
datasets.SplitGenerator(
|
125 |
name=datasets.Split.TRAIN,
|
126 |
# These kwargs will be passed to _generate_examples
|
127 |
gen_kwargs={
|
128 |
+
"filepath": os.path.join(data_dir, self.config.data_dir, "train.parquet"),
|
129 |
},
|
130 |
),
|
131 |
datasets.SplitGenerator(
|
132 |
name=datasets.Split.VALIDATION,
|
133 |
# These kwargs will be passed to _generate_examples
|
134 |
gen_kwargs={
|
135 |
+
"filepath": os.path.join(data_dir, self.config.data_dir, "validation.parquet"),
|
136 |
},
|
137 |
),
|
138 |
datasets.SplitGenerator(
|
139 |
name=datasets.Split.TEST,
|
140 |
# These kwargs will be passed to _generate_examples
|
141 |
gen_kwargs={
|
142 |
+
"filepath": os.path.join(data_dir, self.config.data_dir, "test.parquet"),
|
143 |
},
|
144 |
),
|
145 |
]
|