Datasets:

Languages:
English
Size Categories:
10B<n<100B
ArXiv:
Tags:
math
zhangir-azerbayev commited on
Commit
2ddb8e5
1 Parent(s): cf4cc41

fix dataloader

Browse files
Files changed (2) hide show
  1. proof-pile-2.py +13 -24
  2. test_dataloader.py +1 -1
proof-pile-2.py CHANGED
@@ -114,36 +114,25 @@ class ProofPile2(datasets.GeneratorBasedBuilder):
114
  def _split_generators(self, dl_manager):
115
  return [
116
  datasets.SplitGenerator(
117
- name=datasets.Split.TRAIN,
118
  # These kwargs will be passed to _generate_examples
119
  gen_kwargs={
120
  "data_files": list(map(
121
  dl_manager.download,
122
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["train"]]
123
- )),
 
 
 
 
124
  },
125
- ),
126
- datasets.SplitGenerator(
127
- name=datasets.Split.VALIDATION,
128
- # These kwargs will be passed to _generate_examples
129
- gen_kwargs={
130
- "data_files": list(map(
131
- dl_manager.download,
132
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["validation"]]
133
- )),
134
- },
135
- ),
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TEST,
138
- gen_kwargs={
139
- "data_files": list(map(
140
- dl_manager.download,
141
- [x for subset in self.config.subsets for x in _DATA_PATHS[subset]["test"]]
142
- )),
143
- },
144
- ),
145
  ]
146
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
147
  def _generate_examples(self, data_files):
148
  key = 0
149
  for name in data_files:
 
114
  def _split_generators(self, dl_manager):
115
  return [
116
  datasets.SplitGenerator(
117
+ name=split_obj,
118
  # These kwargs will be passed to _generate_examples
119
  gen_kwargs={
120
  "data_files": list(map(
121
  dl_manager.download,
122
+ [
123
+ x
124
+ for subset in self.config.subsets
125
+ for x in dl_manager.iter_files(os.path.join(subset, split))
126
+ ]
127
+ ))
128
  },
129
+ )
130
+ for split, split_obj in zip(
131
+ ("train", "validation", "test"),
132
+ (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)
133
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  ]
135
+
136
  def _generate_examples(self, data_files):
137
  key = 0
138
  for name in data_files:
test_dataloader.py CHANGED
@@ -7,7 +7,7 @@ import sentencepiece as spm
7
  s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer
8
 
9
  def main():
10
- for subset in ["arxiv", "open-web-math", "algebraic-stack"]:
11
  for split in ["train", "validation", "test"]:
12
  data = load_dataset("proof-pile-2.py", subset)[split]
13
  print(data)
 
7
  s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer
8
 
9
  def main():
10
+ for subset in ["algebraic-stack", "arxiv", "open-web-math",]:
11
  for split in ["train", "validation", "test"]:
12
  data = load_dataset("proof-pile-2.py", subset)[split]
13
  print(data)