Datasets:

Languages:
French
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
system HF staff commited on
Commit
2714efb
1 Parent(s): fe42f57

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. orange_sum.py +18 -14
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - found
4
  language_creators:
1
  ---
2
+ pretty_name: OrangeSum
3
  annotations_creators:
4
  - found
5
  language_creators:
orange_sum.py CHANGED
@@ -15,8 +15,6 @@
15
  """OrangeSum dataset"""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -70,14 +68,15 @@ class OrangeSum(datasets.GeneratorBasedBuilder):
70
 
71
  def _split_generators(self, dl_manager):
72
  """Returns SplitGenerators."""
73
- data_dir = dl_manager.download_and_extract(_URL_DATA[self.config.name])
74
 
75
  return [
76
  datasets.SplitGenerator(
77
  name=datasets.Split.TRAIN,
78
  # These kwargs will be passed to _generate_examples
79
  gen_kwargs={
80
- "filepath": data_dir,
 
81
  "split": "train",
82
  },
83
  ),
@@ -85,7 +84,8 @@ class OrangeSum(datasets.GeneratorBasedBuilder):
85
  name=datasets.Split.TEST,
86
  # These kwargs will be passed to _generate_examples
87
  gen_kwargs={
88
- "filepath": data_dir,
 
89
  "split": "test",
90
  },
91
  ),
@@ -93,18 +93,22 @@ class OrangeSum(datasets.GeneratorBasedBuilder):
93
  name=datasets.Split.VALIDATION,
94
  # These kwargs will be passed to _generate_examples
95
  gen_kwargs={
96
- "filepath": data_dir,
 
97
  "split": "valid",
98
  },
99
  ),
100
  ]
101
 
102
- def _generate_examples(self, filepath, split):
103
  """Yields examples."""
104
- with open(
105
- os.path.join(filepath, self.config.name, "{}.source".format(split)), encoding="utf-8"
106
- ) as f_source, open(
107
- os.path.join(filepath, self.config.name, "{}.target".format(split)), encoding="utf-8"
108
- ) as f_target:
109
- for idx, (document, summary) in enumerate(zip(f_source, f_target)):
110
- yield idx, {_DOCUMENT: document, _SUMMARY: summary}
 
 
 
15
  """OrangeSum dataset"""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
68
 
69
  def _split_generators(self, dl_manager):
70
  """Returns SplitGenerators."""
71
+ archive = dl_manager.download(_URL_DATA[self.config.name])
72
 
73
  return [
74
  datasets.SplitGenerator(
75
  name=datasets.Split.TRAIN,
76
  # These kwargs will be passed to _generate_examples
77
  gen_kwargs={
78
+ "source_files": dl_manager.iter_archive(archive),
79
+ "target_files": dl_manager.iter_archive(archive),
80
  "split": "train",
81
  },
82
  ),
84
  name=datasets.Split.TEST,
85
  # These kwargs will be passed to _generate_examples
86
  gen_kwargs={
87
+ "source_files": dl_manager.iter_archive(archive),
88
+ "target_files": dl_manager.iter_archive(archive),
89
  "split": "test",
90
  },
91
  ),
93
  name=datasets.Split.VALIDATION,
94
  # These kwargs will be passed to _generate_examples
95
  gen_kwargs={
96
+ "source_files": dl_manager.iter_archive(archive),
97
+ "target_files": dl_manager.iter_archive(archive),
98
  "split": "valid",
99
  },
100
  ),
101
  ]
102
 
103
+ def _generate_examples(self, source_files, target_files, split):
104
  """Yields examples."""
105
+ expected_source_path = f"{self.config.name}/{split}.source"
106
+ expected_target_path = f"{self.config.name}/{split}.target"
107
+ for source_path, f_source in source_files:
108
+ if source_path == expected_source_path:
109
+ for target_path, f_target in target_files:
110
+ if target_path == expected_target_path:
111
+ for idx, (document, summary) in enumerate(zip(f_source, f_target)):
112
+ yield idx, {_DOCUMENT: document.decode("utf-8"), _SUMMARY: summary.decode("utf-8")}
113
+ break
114
+ break