dlwh commited on
Commit
62078bd
1 Parent(s): 22c346d
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ *.zst filter=lfs diff=lfs merge=lfs -text
data/test/test.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc2fa558a1a2ab413a9403ed35903b8857eb52181b976eabea2d4f77dd220a61
3
+ size 406084
data/train/train_0_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8524249d8b3fc75b2dec22710b80d7a63b657b4c33b7c1de50779104b4624b2f
3
+ size 22127451
data/train/train_1_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:718215b95e73ecfc601baa404eb22cc5652a04cafe0e6c9bde746dd0655f51b9
3
+ size 22135538
data/train/train_2_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808b3fd5982cd1c03d0544e9bd0cf1b2150a1baf4dcda62395eafcfdde56bdd2
3
+ size 22166917
data/train/train_3_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a15dd05c9f6eb5eafc9c0f4e16621536cde7acf4c60e3d7d9767a27a13c9bd
3
+ size 22258127
data/train/train_4_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cbc2ac7c58a823a11310d795342bfe55ef24b8a8b27ba37717d1b8ebcc8cad4
3
+ size 22270070
data/train/train_5_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9caae63f5039c7a2541bac204f33d53543db40393f8db109812af1b8f50c146
3
+ size 22331298
data/train/train_6_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36161abfad1e5ccba2accab0bacdf46fb2e429559bae3918d0548d964fbf8013
3
+ size 22271652
data/train/train_7_of_8.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abe13bbcde7e10fc599d3b30e5b5adc26cc129eac87316c95ed94c40496431fb
3
+ size 22251069
data/validation/validation.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32388f7ea5aea43085e23afcbbac083d785ac5628fc816d861026111b07e85d6
3
+ size 365219
dataset.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HuggingFace loading script for generic sprucfluo datasets
2
+ # This script was automatically generated by convert_hf_to_sprucfluo
3
+ import json
4
+ import pathlib
5
+
6
+ import datasets
7
+ import fsspec
8
+ from datasets import DatasetInfo, Value, Features
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+ _INFO = DatasetInfo(
13
+ description='Automatically generated for wikitext (wikitext-103-raw-v1), split into 8 shards, detokenized.\n\nOriginal Description:\n The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n',
14
+ citation='@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n',
15
+ homepage='https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/',
16
+ license='Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)',
17
+ version="1.0.0",
18
+ features=Features.from_dict({'text': {'dtype': 'string', 'id': None, '_type': 'Value'}}),
19
+ supervised_keys=None)
20
+
21
+
22
+ class AutoDataset(datasets.GeneratorBasedBuilder):
23
+ BUILDER_CONFIGS = [datasets.BuilderConfig()]
24
+
25
+ def __init__(self, **kwargs):
26
+ super().__init__(**kwargs)
27
+
28
+ def _info(self):
29
+ return _INFO
30
+
31
+ @property
32
+ def dataset_dir(self):
33
+ return pathlib.Path(__file__).parent
34
+
35
+ def _split_generators(self, dl_manager):
36
+ metadata = json.load(open(dl_manager.download("metadata.json"), 'rt'))
37
+ return [
38
+ datasets.SplitGenerator(
39
+ name=split,
40
+ gen_kwargs={"filepaths": dl_manager.download(split_metadata["files"])},
41
+ )
42
+ for split, split_metadata in metadata["splits"].items()
43
+ ]
44
+
45
+ def _generate_examples(self, filepaths):
46
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
47
+ id_: int = 0
48
+ for filepath in filepaths:
49
+ logger.info(f"Generating examples from {filepath}")
50
+ with fsspec.open(filepath, mode="rt", compression="infer", encoding="utf-8") as f:
51
+ for line in f:
52
+ if line:
53
+ example = json.loads(line)
54
+ yield id_, example
55
+ id_ += 1
56
+
57
+
58
+ if __name__ == "__main__":
59
+ AutoDataset().download_and_prepare()
metadata.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "wikitext-wikitext-103-raw-v1",
3
+ "splits": {
4
+ "test": {
5
+ "files": [
6
+ "data/test/test.jsonl.zst"
7
+ ]
8
+ },
9
+ "train": {
10
+ "files": [
11
+ "data/train/train_0_of_8.jsonl.zst",
12
+ "data/train/train_1_of_8.jsonl.zst",
13
+ "data/train/train_2_of_8.jsonl.zst",
14
+ "data/train/train_3_of_8.jsonl.zst",
15
+ "data/train/train_4_of_8.jsonl.zst",
16
+ "data/train/train_5_of_8.jsonl.zst",
17
+ "data/train/train_6_of_8.jsonl.zst",
18
+ "data/train/train_7_of_8.jsonl.zst"
19
+ ]
20
+ },
21
+ "validation": {
22
+ "files": [
23
+ "data/validation/validation.jsonl.zst"
24
+ ]
25
+ }
26
+ }
27
+ }