File size: 4,655 Bytes
e548574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03fb08e
e548574
03fb08e
 
 
e548574
 
 
 
 
 
 
 
 
 
 
 
 
 
3e972d4
e548574
 
 
 
5a6e475
e548574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03fb08e
4ed3e4c
 
03fb08e
e548574
 
 
 
33f69b3
e548574
 
 
 
 
33f69b3
e548574
 
 
 
 
33f69b3
e548574
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
"""
Data loader script for the eur-lex-sum summarization dataset by Aumiller, Chouhan and Gertz.
The script itself was adapted from the XLSum data loader.
"""
import os
import json

import datasets
from datasets.tasks import Summarization


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@article{aumiller-etal-2022-eur,
author = {Aumiller, Dennis and Chouhan, Ashish and Gertz, Michael},
title = {{EUR-Lex-Sum: A Multi- and Cross-lingual Dataset for Long-form Summarization in the Legal Domain}},
journal = {CoRR},
volume = {abs/2210.13448},
eprinttype = {arXiv},
eprint = {2210.13448},
url = {https://arxiv.org/abs/2210.13448}
}
"""

_HOMEPAGE = "https://github.com/achouhan93/eur-lex-sum"

_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"

_DESCRIPTION = """\
The EUR-Lex-Sum dataset is a multilingual resource intended for text summarization in the legal domain.
It is based on human-written summaries of legal acts issued by the European Union.
It distinguishes itself by introducing a smaller set of high-quality human-written samples,
each of which have much longer references (and summaries!) than comparable datasets.
Additionally, the underlying legal acts provide a challenging domain-specific application to legal texts,
which are so far underrepresented in non-English languages.
For each legal act, the sample can be available in up to 24 languages
(the officially recognized languages in the European Union);
the validation and test samples consist entirely of samples available in all languages,
and are aligned across all languages at the paragraph level.
"""

_LANGUAGES = [
    "bulgarian",
    "czech",
    "dutch",
    "estonian",
    "french",
    "greek",
    "",
    "irish",
    "latvian",
    "maltese",
    "portuguese",
    "slovak",
    "spanish",
    "croatian",
    "danish",
    "english",
    "finnish",
    "german",
    "hungarian",
    "italian",
    "lithuanian",
    "polish",
    "romanian",
    "slovenian",
    "swedish"
]

_URL = "https://huggingface.co/datasets/dennlinger/eur-lex-sum/resolve/main/data/"
_URLS = {
    "train": _URL + "{}/train.json",
    "validation": _URL + "{}/validation.json",
    "test": _URL + "{}/test.json",
}


class EurLexSumConfig(datasets.BuilderConfig):
    """BuilderConfig for EUR-Lex-Sum."""

    def __init__(self, **kwargs):
        """BuilderConfig for EUR-Lex-Sum.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(EurLexSumConfig, self).__init__(**kwargs)


class EurLexSum(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=f"{lang}",
            version=datasets.Version("1.0.0")
        )
        for lang in _LANGUAGES
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "celex_id": datasets.Value("string"),
                    "reference": datasets.Value("string"),
                    "summary": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
            task_templates=[
                Summarization(task="summarization", text_column="reference", summary_column="summary")
            ],
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        lang = str(self.config.name)

        # Add language tag for each split
        urls = {k: url.format(lang) for k, url in _URLS.items()}
        print(urls)
        data_dir = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_dir["train"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": data_dir["validation"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": data_dir["test"],
                },
            ),
        ]

    def _generate_examples(self, filepath):
        """Yields examples as (key, example) tuples."""
        with open(filepath) as f:
            for idx_, row in enumerate(f):
                data = json.loads(row)
                yield idx_, data