Dennis Aumiller commited on
Commit
e548574
1 Parent(s): 1065197

Adding custom data loader based on the XLSum script.

Browse files
Files changed (1) hide show
  1. eur-lex-sum.py +152 -0
eur-lex-sum.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data loader script for the eur-lex-sum summarization dataset by Aumiller, Chouhan and Gertz.
3
+ The script itself was adapted from the XLSum data loader.
4
+ """
5
+ import os
6
+ import json
7
+
8
+ import datasets
9
+ from datasets.tasks import Summarization
10
+
11
+
12
+ logger = datasets.logging.get_logger(__name__)
13
+
14
+
15
+ _CITATION = """\
16
+ @article{aumiller-etal-2022-eur,
17
+ author = {Aumiller, Dennis and Chouhan, Ashish and Gertz, Michael},
18
+ title = {{EUR-Lex-Sum: A Multi- and Cross-lingual Dataset for Long-form Summarization in the Legal Domain}},
19
+ journal = {CoRR},
20
+ volume = {abs/2210.13448},
21
+ eprinttype = {arXiv},
22
+ eprint = {2210.13448},
23
+ url = {https://arxiv.org/abs/2210.13448}
24
+ }
25
+ """
26
+
27
+ _HOMEPAGE = "https://github.com/achouhan93/eur-lex-sum"
28
+
29
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
30
+
31
+ _DESCRIPTION = """\
32
+ The EUR-Lex-Sum dataset is a multilingual resource intended for text summarization in the legal domain.
33
+ It is based on human-written summaries of legal acts issued by the European Union.
34
+ It distinguishes itself by introducing a smaller set of high-quality human-written samples,
35
+ each of which have much longer references (and summaries!) than comparable datasets.
36
+ Additionally, the underlying legal acts provide a challenging domain-specific application to legal texts,
37
+ which are so far underrepresented in non-English languages.
38
+ For each legal act, the sample can be available in up to 24 languages
39
+ (the officially recognized languages in the European Union);
40
+ the validation and test samples consist entirely of samples available in all languages,
41
+ and are aligned across all languages at the paragraph level.
42
+ """
43
+
44
+ _LANGUAGES = [
45
+ "bulgarian",
46
+ "czech",
47
+ "dutch",
48
+ "estonian",
49
+ "french",
50
+ "greek",
51
+ "",
52
+ "irish",
53
+ "latvian",
54
+ "maltese",
55
+ "portuguese",
56
+ "slovak",
57
+ "spanish",
58
+ "croatian",
59
+ "danish",
60
+ "english",
61
+ "finnish",
62
+ "german",
63
+ "hungarian",
64
+ "italian",
65
+ "lithuanian",
66
+ "polish",
67
+ "romanian",
68
+ "slovenian",
69
+ "swedish"
70
+ ]
71
+
72
+ _URL = "https://huggingface.co/datasets/dennlinger/eur-lex-sum/tree/main/data/{}/"
73
+ _URLS = {
74
+ "train": _URL + "train-v1.1.json",
75
+ "dev": _URL + "dev-v1.1.json",
76
+ }
77
+
78
+
79
+ class EurLexSumConfig(datasets.BuilderConfig):
80
+ """BuilderConfig for EUR-Lex-Sum."""
81
+
82
+ def __init__(self, **kwargs):
83
+ """BuilderConfig for EUR-Lex-Sum.
84
+ Args:
85
+ **kwargs: keyword arguments forwarded to super.
86
+ """
87
+ super(EurLexSumConfig, self).__init__(**kwargs)
88
+
89
+
90
+ class EurLexSum(Summarization):
91
+ VERSION = datasets.Version("1.0.0")
92
+
93
+ BUILDER_CONFIGS = [
94
+ datasets.BuilderConfig(
95
+ name="{}".format(lang),
96
+ version=datasets.Version("1.0.0")
97
+ )
98
+ for lang in _LANGUAGES
99
+ ]
100
+
101
+ def _info(self):
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=datasets.Features(
105
+ {
106
+ "celex_id": datasets.Value("string"),
107
+ "reference": datasets.Value("string"),
108
+ "summary": datasets.Value("string")
109
+ }
110
+ ),
111
+ supervised_keys=None,
112
+ homepage=_HOMEPAGE,
113
+ citation=_CITATION,
114
+ license=_LICENSE,
115
+ task_templates=[
116
+ Summarization(task="summarization", text_column="reference", summary_column="summary")
117
+ ],
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ """Returns SplitGenerators."""
122
+ lang = str(self.config.name)
123
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
124
+
125
+ data_dir = dl_manager.download_and_extract(url)
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir, lang, "train.json"),
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.VALIDATION,
135
+ gen_kwargs={
136
+ "filepath": os.path.join(data_dir, lang,"validation.json"),
137
+ },
138
+ ),
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TEST,
141
+ gen_kwargs={
142
+ "filepath": os.path.join(data_dir, lang, "test.json"),
143
+ },
144
+ ),
145
+ ]
146
+
147
+ def _generate_examples(self, filepath):
148
+ """Yields examples as (key, example) tuples."""
149
+ with open(filepath) as f:
150
+ for idx_, row in enumerate(f):
151
+ data = json.loads(row)
152
+ yield idx_, data