Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
yuvalkirstain commited on
Commit
10f525a
1 Parent(s): a1fa43e

trying to pass max_source_length attr to dataset loader

Browse files
Files changed (1) hide show
  1. fs.py +4 -1
fs.py CHANGED
@@ -136,6 +136,7 @@ class Fs(datasets.GeneratorBasedBuilder):
136
  gen_kwargs={
137
  "data_file": os.path.join(dl_dir, "train.jsonl"),
138
  "split": datasets.Split.TRAIN,
 
139
  },
140
  ),
141
  datasets.SplitGenerator(
@@ -143,6 +144,7 @@ class Fs(datasets.GeneratorBasedBuilder):
143
  gen_kwargs={
144
  "data_file": os.path.join(dl_dir, "validation.jsonl"),
145
  "split": datasets.Split.VALIDATION,
 
146
  },
147
  ),
148
  datasets.SplitGenerator(
@@ -150,11 +152,12 @@ class Fs(datasets.GeneratorBasedBuilder):
150
  gen_kwargs={
151
  "data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
152
  "split": datasets.Split.TEST,
 
153
  },
154
  ),
155
  ]
156
 
157
- def _generate_examples(self, data_file, split):
158
  with open(data_file, encoding="utf-8") as f:
159
  for line in f:
160
  row = json.loads(line)
 
136
  gen_kwargs={
137
  "data_file": os.path.join(dl_dir, "train.jsonl"),
138
  "split": datasets.Split.TRAIN,
139
+ "max_source_length": self.config.max_source_length,
140
  },
141
  ),
142
  datasets.SplitGenerator(
 
144
  gen_kwargs={
145
  "data_file": os.path.join(dl_dir, "validation.jsonl"),
146
  "split": datasets.Split.VALIDATION,
147
+ "max_source_length": self.config.max_source_length,
148
  },
149
  ),
150
  datasets.SplitGenerator(
 
152
  gen_kwargs={
153
  "data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
154
  "split": datasets.Split.TEST,
155
+ "max_source_length": self.config.max_source_length,
156
  },
157
  ),
158
  ]
159
 
160
+ def _generate_examples(self, data_file, split, max_source_length):
161
  with open(data_file, encoding="utf-8") as f:
162
  for line in f:
163
  row = json.loads(line)