kasnerz commited on
Commit
ed20eea
1 Parent(s): 5750300

Delete _cacapo.py

Browse files
Files changed (1) hide show
  1. _cacapo.py +0 -132
_cacapo.py DELETED
@@ -1,132 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- """
4
- The script used to load the dataset from the original source.
5
- """
6
-
7
- import os
8
- import xml.etree.cElementTree as ET
9
- from collections import defaultdict
10
- from glob import glob
11
- from os.path import join as pjoin
12
- from pathlib import Path
13
-
14
- import datasets
15
-
16
- _CITATION = """\
17
- @inproceedings{van2020cacapo,
18
- title={The CACAPO dataset: A multilingual, multi-domain dataset for neural pipeline and end-to-end data-to-text generation},
19
- author={van der Lee, Chris and Emmery, Chris and Wubben, Sander and Krahmer, Emiel},
20
- booktitle={Proceedings of the 13th International Conference on Natural Language Generation},
21
- pages={68--79},
22
- year={2020}
23
- }
24
- """
25
-
26
- _DESCRIPTION = """\
27
- CACAPO is a data-to-text dataset that contains sentences from news reports for the sports, weather, stock, and incidents domain in English and Dutch, aligned with relevant attribute-value paired data. This is the first data-to-text dataset based on "naturally occurring" human-written texts (i.e., texts that were not collected in a task-based setting), that covers various domains, as well as multiple languages. """
28
- _URL = "https://github.com/TallChris91/CACAPO-Dataset"
29
- _LICENSE = "CC BY 4.0"
30
-
31
- def et_to_dict(tree):
32
- dct = {tree.tag: {} if tree.attrib else None}
33
- children = list(tree)
34
- if children:
35
- dd = defaultdict(list)
36
- for dc in map(et_to_dict, children):
37
- for k, v in dc.items():
38
- dd[k].append(v)
39
- dct = {tree.tag: dd}
40
- if tree.attrib:
41
- dct[tree.tag].update((k, v) for k, v in tree.attrib.items())
42
- if tree.text:
43
- text = tree.text.strip()
44
- if children or tree.attrib:
45
- if text:
46
- dct[tree.tag]["text"] = text
47
- else:
48
- dct[tree.tag] = text
49
- return dct
50
-
51
-
52
- def parse_entry(entry):
53
- res = {}
54
- otriple_set_list = entry["originaltripleset"]
55
- res["original_triple_sets"] = [{"otriple_set": otriple_set["otriple"]} for otriple_set in otriple_set_list]
56
- mtriple_set_list = entry["modifiedtripleset"]
57
- res["modified_triple_sets"] = [{"mtriple_set": mtriple_set["mtriple"]} for mtriple_set in mtriple_set_list]
58
- res["category"] = entry["category"]
59
- res["eid"] = entry["eid"]
60
- res["size"] = int(entry["size"])
61
- res["lex"] = {
62
- "comment": [ex.get("comment", "") for ex in entry.get("lex", [])],
63
- "lid": [ex.get("lid", "") for ex in entry.get("lex", [])],
64
- "text": [ex.get("text", "") for ex in entry.get("lex", [])],
65
- }
66
- return res
67
-
68
-
69
- def xml_file_to_examples(filename):
70
- tree = ET.parse(filename).getroot()
71
-
72
- examples = et_to_dict(tree)["benchmark"]["entries"][0]["entry"]
73
- return [parse_entry(entry) for entry in examples]
74
-
75
-
76
- class CACAPO(datasets.GeneratorBasedBuilder):
77
- VERSION = datasets.Version("1.0.0")
78
-
79
- def _info(self):
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=datasets.Features({
83
- "category": datasets.Value("string"),
84
- "lang": datasets.Value("string"),
85
- "size": datasets.Value("int32"),
86
- "eid": datasets.Value("string"),
87
- "original_triple_sets": datasets.Sequence(
88
- {"otriple_set": datasets.Sequence(datasets.Value("string"))}
89
- ),
90
- "modified_triple_sets": datasets.Sequence(
91
- {"mtriple_set": datasets.Sequence(datasets.Value("string"))}
92
- ),
93
- "lex": datasets.Sequence(
94
- {
95
- "comment": datasets.Value("string"),
96
- "lid": datasets.Value("string"),
97
- "text": datasets.Value("string"),
98
- }
99
- ),
100
- }),
101
- supervised_keys=None,
102
- homepage=_URL,
103
- citation=_CITATION,
104
- license=_LICENSE,
105
- )
106
-
107
- def _split_generators(self, dl_manager):
108
- """Returns SplitGenerators."""
109
- return [
110
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filedirs": ["Incidents", "Sports", "Stocks", "Weather"], "split" : "train"}),
111
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filedirs": ["Incidents", "Sports", "Stocks", "Weather"], "split" : "dev"}),
112
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filedirs": ["Incidents", "Sports", "Stocks", "Weather"], "split" : "test"}),
113
- ]
114
-
115
- def _generate_examples(self, filedirs, split):
116
- """Yields examples."""
117
- id_ = 0
118
-
119
- for lang in ["en", "nl"]:
120
- for filedir in filedirs:
121
- xml_file = os.path.join(lang, filedir, f"WebNLGFormat{split.title()}.xml")
122
-
123
- for exple_dict in xml_file_to_examples(xml_file):
124
- exple_dict["category"] = filedir
125
- exple_dict["lang"] = lang
126
- id_ += 1
127
- yield id_, exple_dict
128
-
129
-
130
- if __name__ == '__main__':
131
- dataset = datasets.load_dataset(__file__)
132
- dataset.push_to_hub("kasnerz/cacapo")