lewtun HF staff commited on
Commit
37d39fe
1 Parent(s): 055d1cd

Add loading script and dataset infos

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. mlsum.py +159 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"de": {"description": "This is the MLSUM subset of the GEM benchmark. MLSUM is the first large-scale MultiLingual SUMmarization dataset.\nObtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages -- namely, French, German, Spanish, Russian, Turkish.\nTogether with English newspapers from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community.\nWe report cross-lingual comparative analyses based on state-of-the-art systems.\nThese highlight existing biases which motivate the use of a multi-lingual dataset.\n", "citation": "@article{scialom2020mlsum,\n title={MLSUM: The Multilingual Summarization Corpus},\n author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo},\n journal={arXiv preprint arXiv:2004.14900},\n year={2020}\n}\n", "homepage": "", "license": "", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "gem_parent_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "mlsum", "config_name": "de", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 855411361, "num_examples": 220748, "dataset_name": "mlsum"}, "validation": {"name": "validation", "num_bytes": 49576087, "num_examples": 11392, "dataset_name": "mlsum"}, "test": {"name": "test", "num_bytes": 49018014, "num_examples": 10695, "dataset_name": "mlsum"}, "challenge_train_sample": {"name": "challenge_train_sample", "num_bytes": 1891220, "num_examples": 500, "dataset_name": "mlsum"}, "challenge_validation_sample": {"name": "challenge_validation_sample", "num_bytes": 2199723, "num_examples": 500, "dataset_name": "mlsum"}, "challenge_test_covid": {"name": "challenge_test_covid", "num_bytes": 19710589, "num_examples": 5058, "dataset_name": "mlsum"}}, "download_checksums": {"https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip": {"num_bytes": 311059697, "checksum": "88e788437bae48af6b3d18a554af4b2794cc6143a137df3f56daa91a37e3ea7e"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip": {"num_bytes": 17771216, "checksum": "732620c32e1d3f393ee3193f57f1217d8549499eb4906e144252aaab39aa910b"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip": {"num_bytes": 17741147, "checksum": "447e3b1839ab94d5700cc2aedc0b52521404865b2589656acc90a654ed0de4ff"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json": {"num_bytes": 784429, "checksum": "7d1b5c340329da32b3a6c1b880e9d72b5193eb0782bc03261e4eaee08c3d5b64"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_de.zip": {"num_bytes": 15427039, "checksum": "7cc6751d7a76e833e0db27ce0b06a50be4df43dfd5d5284dd11888439a126310"}}, "download_size": 362783528, "post_processing_size": null, "dataset_size": 977806994, "size_in_bytes": 1340590522}, "es": {"description": "This is the MLSUM subset of the GEM benchmark. MLSUM is the first large-scale MultiLingual SUMmarization dataset.\nObtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages -- namely, French, German, Spanish, Russian, Turkish.\nTogether with English newspapers from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community.\nWe report cross-lingual comparative analyses based on state-of-the-art systems.\nThese highlight existing biases which motivate the use of a multi-lingual dataset.\n", "citation": "@article{scialom2020mlsum,\n title={MLSUM: The Multilingual Summarization Corpus},\n author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo},\n journal={arXiv preprint arXiv:2004.14900},\n year={2020}\n}\n", "homepage": "", "license": "", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "gem_parent_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "mlsum", "config_name": "es", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1208122300, "num_examples": 259888, "dataset_name": "mlsum"}, "validation": {"name": "validation", "num_bytes": 51491999, "num_examples": 9977, "dataset_name": "mlsum"}, "test": {"name": "test", "num_bytes": 71957172, "num_examples": 13366, "dataset_name": "mlsum"}, "challenge_train_sample": {"name": "challenge_train_sample", "num_bytes": 2363443, "num_examples": 500, "dataset_name": "mlsum"}, "challenge_validation_sample": {"name": "challenge_validation_sample", "num_bytes": 2655596, "num_examples": 500, "dataset_name": "mlsum"}, "challenge_test_covid": {"name": "challenge_test_covid", "num_bytes": 13553368, "num_examples": 1938, "dataset_name": "mlsum"}}, "download_checksums": {"https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip": {"num_bytes": 466443036, "checksum": "a01f4b4b873aa6cdeae15952a22ede2146734d0b60e7297470a35956507c863a"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip": {"num_bytes": 19483214, "checksum": "e38fce9950008ec4b48963692891c4c94d51a1e307286fb596e093aeb1230c92"}, "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip": {"num_bytes": 27386169, "checksum": "177cfcf358bc4aa9bce2753b8e9de4f6eb41d2c30b1a99ef29d64e70537a1c0d"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json": {"num_bytes": 784429, "checksum": "7d1b5c340329da32b3a6c1b880e9d72b5193eb0782bc03261e4eaee08c3d5b64"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_es.zip": {"num_bytes": 11524578, "checksum": "a254972fef695970aea0370be64fed6aec8c8b760f238fabd0e8f363bf8274cd"}}, "download_size": 525621426, "post_processing_size": null, "dataset_size": 1350143878, "size_in_bytes": 1875765304}}
mlsum.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @article{scialom2020mlsum,
8
+ title={MLSUM: The Multilingual Summarization Corpus},
9
+ author={Scialom, Thomas and Dray, Paul-Alexis and Lamprier, Sylvain and Piwowarski, Benjamin and Staiano, Jacopo},
10
+ journal={arXiv preprint arXiv:2004.14900},
11
+ year={2020}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ This is the MLSUM subset of the GEM benchmark. MLSUM is the first large-scale MultiLingual SUMmarization dataset.
17
+ Obtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages -- namely, French, German, Spanish, Russian, Turkish.
18
+ Together with English newspapers from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community.
19
+ We report cross-lingual comparative analyses based on state-of-the-art systems.
20
+ These highlight existing biases which motivate the use of a multi-lingual dataset.
21
+ """
22
+ _URL = "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/"
23
+ _LANG = ["de", "es"]
24
+ _URLs = {
25
+ "de": {
26
+ "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
27
+ "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
28
+ "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
29
+ "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
30
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_de.zip",
31
+ },
32
+ "es": {
33
+ "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
34
+ "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
35
+ "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
36
+ "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
37
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_es.zip",
38
+ },
39
+ }
40
+
41
+
42
+ class Mlsum(datasets.GeneratorBasedBuilder):
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name=lang,
47
+ version=datasets.Version("1.0.0"),
48
+ description="",
49
+ )
50
+ for lang in _LANG
51
+ ]
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=datasets.Features(
57
+ {
58
+ "gem_id": datasets.Value("string"),
59
+ "gem_parent_id": datasets.Value("string"),
60
+ "text": datasets.Value("string"),
61
+ "topic": datasets.Value("string"),
62
+ "url": datasets.Value("string"),
63
+ "title": datasets.Value("string"),
64
+ "date": datasets.Value("string"),
65
+ "target": datasets.Value("string"),
66
+ "references": [datasets.Value("string")],
67
+ }
68
+ ),
69
+ supervised_keys=None,
70
+ homepage="",
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
77
+ lang = str(self.config.name)
78
+ challenge_sets = [
79
+ ("challenge_train_sample", f"train_mlsum_{lang}_RandomSample500.json"),
80
+ ("challenge_validation_sample", f"validation_mlsum_{lang}_RandomSample500.json"),
81
+ ("challenge_test_covid", f"{lang}_test_covid19_cleaned.jsonl"),
82
+ ]
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TRAIN,
86
+ gen_kwargs={
87
+ "filepath": os.path.join(dl_dir["train"], lang + "_train.jsonl"),
88
+ "split": "train",
89
+ "lang": lang,
90
+ "filepaths": dl_dir["bad_ids"],
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(dl_dir["validation"], lang + "_val.jsonl"),
97
+ "split": "validation",
98
+ "lang": lang,
99
+ "filepaths": dl_dir["bad_ids"],
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={
105
+ "filepath": os.path.join(dl_dir["test"], lang + "_test.jsonl"),
106
+ "split": "test",
107
+ "lang": lang,
108
+ "filepaths": dl_dir["bad_ids"],
109
+ },
110
+ ),
111
+ ] + [
112
+ datasets.SplitGenerator(
113
+ name=challenge_split,
114
+ gen_kwargs={
115
+ "filepath": os.path.join(dl_dir["challenge_set"], f"mlsum_{self.config.name}", filename),
116
+ "split": challenge_split,
117
+ },
118
+ )
119
+ for challenge_split, filename in challenge_sets
120
+ ]
121
+
122
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
123
+ """Yields examples."""
124
+ if split in ["train", "validation", "test", "challenge_test_covid"]:
125
+ if split == "challenge_test_covid":
126
+ bad_ids = {}
127
+ else:
128
+ bad_ids_dct = json.load(open(filepaths, encoding="utf-8"))
129
+ bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"])
130
+ with open(filepath, encoding="utf-8") as f:
131
+ id_ = -1
132
+ for line in f:
133
+ data = json.loads(line)
134
+ if data["url"] in bad_ids:
135
+ continue
136
+ else:
137
+ id_ += 1
138
+ yield id_, {
139
+ "gem_id": f"{self.config.name}-{split}-{id_}",
140
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
141
+ "text": data["text"],
142
+ "target": data["summary"],
143
+ "references": [] if split == "train" else [data["summary"]],
144
+ "topic": data["topic"],
145
+ "url": data["url"],
146
+ "title": data["title"],
147
+ "date": data["date"],
148
+ }
149
+ else:
150
+ exples = json.load(open(filepath, encoding="utf-8"))
151
+ if isinstance(exples, dict):
152
+ assert len(exples) == 1, "multiple entries found"
153
+ exples = list(exples.values())[0]
154
+ for id_, exple in enumerate(exples):
155
+ if len(exple) == 0:
156
+ continue
157
+ exple["gem_parent_id"] = exple["gem_id"]
158
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
159
+ yield id_, exple