Datasets:

Sub-tasks:
rdf-to-text
Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
found
Tags:
License:
albertvillanova HF staff commited on
Commit
e1f327f
1 Parent(s): 587a271

Create script

Browse files
Files changed (1) hide show
  1. challenge-2023.py +175 -0
challenge-2023.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The WebNLG 2023 Challenge."""
16
+
17
+
18
+ import os
19
+ import xml.etree.ElementTree as ET
20
+ from collections import defaultdict
21
+
22
+ import datasets
23
+
24
+
25
+ _HOMEPAGE = "https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/"
26
+
27
+ _DESCRIPTION = """\
28
+ The WebNLG challenge consists in mapping data to text. The training data consists
29
+ of Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation
30
+ of these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).
31
+
32
+ a. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)
33
+ b. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot
34
+
35
+ As the example illustrates, the task involves specific NLG subtasks such as sentence segmentation
36
+ (how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),
37
+ aggregation (how to avoid repetitions) and surface realisation
38
+ (how to build a syntactically correct and natural sounding text).
39
+ """
40
+
41
+ _LICENSE = ""
42
+
43
+ _CITATION = """\
44
+ @inproceedings{web_nlg,
45
+ author = {Claire Gardent and
46
+ Anastasia Shimorina and
47
+ Shashi Narayan and
48
+ Laura Perez{-}Beltrachini},
49
+ editor = {Regina Barzilay and
50
+ Min{-}Yen Kan},
51
+ title = {Creating Training Corpora for {NLG} Micro-Planners},
52
+ booktitle = {Proceedings of the 55th Annual Meeting of the
53
+ Association for Computational Linguistics,
54
+ {ACL} 2017, Vancouver, Canada, July 30 - August 4,
55
+ Volume 1: Long Papers},
56
+ pages = {179--188},
57
+ publisher = {Association for Computational Linguistics},
58
+ year = {2017},
59
+ url = {https://doi.org/10.18653/v1/P17-1017},
60
+ doi = {10.18653/v1/P17-1017}
61
+ }
62
+ """
63
+
64
+ # From: https://github.com/WebNLG/2023-Challenge
65
+ _URL = "data.zip"
66
+
67
+ _LANGUAGES = ["br", "cy", "ga", "mt", "ru"]
68
+
69
+
70
+ def et_to_dict(tree):
71
+ dct = {tree.tag: {} if tree.attrib else None}
72
+ children = list(tree)
73
+ if children:
74
+ dd = defaultdict(list)
75
+ for dc in map(et_to_dict, children):
76
+ for k, v in dc.items():
77
+ dd[k].append(v)
78
+ dct = {tree.tag: dd}
79
+ if tree.attrib:
80
+ dct[tree.tag].update((k, v) for k, v in tree.attrib.items())
81
+ if tree.text:
82
+ text = tree.text.strip()
83
+ if children or tree.attrib:
84
+ if text:
85
+ dct[tree.tag]["text"] = text
86
+ else:
87
+ dct[tree.tag] = text
88
+ return dct
89
+
90
+
91
+ def parse_entry(entry):
92
+ res = {}
93
+ otriple_set_list = entry["originaltripleset"]
94
+ res["original_triple_sets"] = [{"otriple_set": otriple_set["otriple"]} for otriple_set in otriple_set_list]
95
+ mtriple_set_list = entry["modifiedtripleset"]
96
+ res["modified_triple_sets"] = [{"mtriple_set": mtriple_set["mtriple"]} for mtriple_set in mtriple_set_list]
97
+ res["category"] = entry["category"]
98
+ res["eid"] = entry["eid"]
99
+ res["size"] = int(entry["size"])
100
+ res["lex"] = {
101
+ "comment": [ex.get("comment", "") for ex in entry.get("lex", [])],
102
+ "lid": [ex.get("lid", "") for ex in entry.get("lex", [])],
103
+ "text": [ex.get("text", "") for ex in entry.get("lex", [])],
104
+ "lang": [ex.get("lang", "") for ex in entry.get("lex", [])],
105
+ }
106
+ res["shape"] = entry.get("shape", "")
107
+ res["shape_type"] = entry.get("shape_type", "")
108
+ return res
109
+
110
+
111
+ def xml_file_to_examples(filename):
112
+ tree = ET.parse(filename).getroot()
113
+ examples = et_to_dict(tree)["benchmark"]["entries"][0]["entry"]
114
+ return [parse_entry(entry) for entry in examples]
115
+
116
+
117
+ class Challenge2023(datasets.GeneratorBasedBuilder):
118
+ """The WebNLG 2023 Challenge dataset."""
119
+
120
+ VERSION = datasets.Version("1.0.0")
121
+
122
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name=language) for language in _LANGUAGES]
123
+
124
+ def _info(self):
125
+ features = datasets.Features(
126
+ {
127
+ "category": datasets.Value("string"),
128
+ "size": datasets.Value("int32"),
129
+ "eid": datasets.Value("string"),
130
+ "original_triple_sets": datasets.Sequence(
131
+ {"otriple_set": datasets.Sequence(datasets.Value("string"))}
132
+ ),
133
+ "modified_triple_sets": datasets.Sequence(
134
+ {"mtriple_set": datasets.Sequence(datasets.Value("string"))}
135
+ ),
136
+ "shape": datasets.Value("string"),
137
+ "shape_type": datasets.Value("string"),
138
+ "lex": datasets.Sequence(
139
+ {
140
+ "comment": datasets.Value("string"),
141
+ "lid": datasets.Value("string"),
142
+ "text": datasets.Value("string"),
143
+ "lang": datasets.Value("string"),
144
+ }
145
+ ),
146
+ }
147
+ )
148
+ return datasets.DatasetInfo(
149
+ description=_DESCRIPTION,
150
+ features=features,
151
+ homepage=_HOMEPAGE,
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+ """Returns SplitGenerators."""
157
+ data_dir = dl_manager.download_and_extract(_URL)
158
+ splits = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev"}
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=split,
162
+ # These kwargs will be passed to _generate_examples
163
+ gen_kwargs={
164
+ "xml_file": os.path.join(data_dir, "data", f"{self.config.name}_{split_filename}.xml"),
165
+ },
166
+ )
167
+ for split, split_filename in splits.items()
168
+ ]
169
+
170
+ def _generate_examples(self, xml_file):
171
+ """Yields examples."""
172
+ id_ = 0
173
+ for exple_dict in xml_file_to_examples(xml_file):
174
+ yield id_, exple_dict
175
+ id_ += 1