Datasets:

Multilinguality:
multilingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
License:
lhoestq HF staff commited on
Commit
7c257e9
1 Parent(s): 911f635

Update xlsum.py

Browse files
Files changed (1) hide show
  1. xlsum.py +5 -5
xlsum.py CHANGED
@@ -133,30 +133,30 @@ class Xlsum(datasets.GeneratorBasedBuilder):
133
  datasets.SplitGenerator(
134
  name=datasets.Split.TRAIN,
135
  gen_kwargs={
136
- "filepath": lang + "_train.jsonl",
137
  "files": dl_manager.iter_archive(archive),
138
  },
139
  ),
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TEST,
142
  gen_kwargs={
143
- "filepath": lang + "_test.jsonl",
144
  "files": dl_manager.iter_archive(archive),
145
  },
146
  ),
147
  datasets.SplitGenerator(
148
  name=datasets.Split.VALIDATION,
149
  gen_kwargs={
150
- "filepath": lang + "_val.jsonl",
151
  "files": dl_manager.iter_archive(archive),
152
  },
153
  ),
154
  ]
155
 
156
- def _generate_examples(self, filepath, files):
157
  """Yields examples as (key, example) tuples."""
158
  for path, f in files:
159
- if path == filepath:
160
  for idx_, row in enumerate(f):
161
  data = json.loads(row.decode("utf-8"))
162
  yield idx_, {
 
133
  datasets.SplitGenerator(
134
  name=datasets.Split.TRAIN,
135
  gen_kwargs={
136
+ "filename": lang + "_train.jsonl",
137
  "files": dl_manager.iter_archive(archive),
138
  },
139
  ),
140
  datasets.SplitGenerator(
141
  name=datasets.Split.TEST,
142
  gen_kwargs={
143
+ "filename": lang + "_test.jsonl",
144
  "files": dl_manager.iter_archive(archive),
145
  },
146
  ),
147
  datasets.SplitGenerator(
148
  name=datasets.Split.VALIDATION,
149
  gen_kwargs={
150
+ "filename": lang + "_val.jsonl",
151
  "files": dl_manager.iter_archive(archive),
152
  },
153
  ),
154
  ]
155
 
156
+ def _generate_examples(self, filename, files):
157
  """Yields examples as (key, example) tuples."""
158
  for path, f in files:
159
+ if os.path.basename(path) == filename:
160
  for idx_, row in enumerate(f):
161
  data = json.loads(row.decode("utf-8"))
162
  yield idx_, {