zhangir-azerbayev commited on
Commit
70d5a43
1 Parent(s): 4e223b3

fix dataloader

Browse files
Files changed (2) hide show
  1. proof-pile-2.py +10 -47
  2. test_dataloader.py +1 -1
proof-pile-2.py CHANGED
@@ -19,29 +19,6 @@ A dataset of high quality mathematical text. """
19
  _HOMEPAGE = "https://github.com/EleutherAI/math-lm"
20
 
21
 
22
- SPLITS = ["train", "validation", "test"]
23
-
24
- _DATA_PATHS = {
25
- "arxiv": {
26
- split: [f'arxiv/{split}/arXiv_{str(i).zfill(3)}.jsonl.zst' for i in range(100)]
27
- for split in SPLITS
28
- },
29
- "open-web-math": {
30
- split: [
31
- os.path.join(f"open-web-math/{split}", filename)
32
- for filename in os.listdir(f"open-web-math/{split}")
33
- ]
34
- for split in SPLITS
35
- },
36
- "algebraic-stack": {
37
- split: [
38
- os.path.join(f"algebraic-stack/{split}", filename)
39
- for filename in os.listdir(f"algebraic-stack/{split}")
40
- ]
41
- for split in SPLITS
42
- }
43
- }
44
-
45
  class ProofPile2Config(datasets.BuilderConfig):
46
  """BuilderConfig for RedPajama sample."""
47
 
@@ -70,12 +47,6 @@ class ProofPile2(datasets.GeneratorBasedBuilder):
70
  # data = datasets.load_dataset('my_dataset', 'first_domain')
71
  # data = datasets.load_dataset('my_dataset', 'second_domain')
72
  BUILDER_CONFIGS = [
73
- ProofPile2Config(
74
- name='default',
75
- subsets=list(_DATA_PATHS.keys()),
76
- version=VERSION,
77
- description="All subsets"
78
- ),
79
  ProofPile2Config(
80
  name='arxiv',
81
  subsets=["arxiv"],
@@ -117,37 +88,29 @@ class ProofPile2(datasets.GeneratorBasedBuilder):
117
  name=datasets.Split.TRAIN,
118
  # These kwargs will be passed to _generate_examples
119
  gen_kwargs={
120
- "data_files": list(map(
121
- dl_manager.download,
122
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["train"]]
123
- )),
124
  },
125
  ),
126
  datasets.SplitGenerator(
127
- name=datasets.Split.VALIDATION,
128
  # These kwargs will be passed to _generate_examples
129
  gen_kwargs={
130
- "data_files": list(map(
131
- dl_manager.download,
132
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["validation"]]
133
- )),
134
  },
135
  ),
136
  datasets.SplitGenerator(
137
- name=datasets.Split.TEST,
 
138
  gen_kwargs={
139
- "data_files": list(map(
140
- dl_manager.download,
141
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["test"]]
142
- )),
143
- },
144
- ),
145
  ]
146
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
147
  def _generate_examples(self, data_files):
148
  key = 0
149
- for name in data_files:
150
- with zstd.open(open(name, "rb"), "rt", encoding="utf-8") as f:
151
  for x in f.readlines():
152
  instance = json.loads(x)
153
  if instance:
 
19
  _HOMEPAGE = "https://github.com/EleutherAI/math-lm"
20
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class ProofPile2Config(datasets.BuilderConfig):
23
  """BuilderConfig for RedPajama sample."""
24
 
 
47
  # data = datasets.load_dataset('my_dataset', 'first_domain')
48
  # data = datasets.load_dataset('my_dataset', 'second_domain')
49
  BUILDER_CONFIGS = [
 
 
 
 
 
 
50
  ProofPile2Config(
51
  name='arxiv',
52
  subsets=["arxiv"],
 
88
  name=datasets.Split.TRAIN,
89
  # These kwargs will be passed to _generate_examples
90
  gen_kwargs={
91
+ "data_files": dl_manager.iter_archive(os.path.join(subset, "train"))
 
 
 
92
  },
93
  ),
94
  datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN,
96
  # These kwargs will be passed to _generate_examples
97
  gen_kwargs={
98
+ "data_files": dl_manager.iter_archive(os.path.join(subset, "validation"))
 
 
 
99
  },
100
  ),
101
  datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ # These kwargs will be passed to _generate_examples
104
  gen_kwargs={
105
+ "data_files": dl_manager.iter_archive(os.path.join(subset, "test"))
106
+ },
107
+ ),
 
 
 
108
  ]
109
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
110
  def _generate_examples(self, data_files):
111
  key = 0
112
+ for fle in data_files:
113
+ with zstd.open(fle, "rt", encoding="utf-8") as f:
114
  for x in f.readlines():
115
  instance = json.loads(x)
116
  if instance:
test_dataloader.py CHANGED
@@ -7,7 +7,7 @@ import sentencepiece as spm
7
  s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer
8
 
9
  def main():
10
- for subset in ["arxiv", "open-web-math", "algebraic-stack"]:
11
  for split in ["train", "validation", "test"]:
12
  data = load_dataset("proof-pile-2.py", subset)[split]
13
  print(data)
 
7
  s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer
8
 
9
  def main():
10
+ for subset in ["algebraic-stack", "arxiv", "open-web-math",]:
11
  for split in ["train", "validation", "test"]:
12
  data = load_dataset("proof-pile-2.py", subset)[split]
13
  print(data)