Datasets:

Sub-tasks:
extractive-qa
Languages:
Catalan
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
File size: 4,280 Bytes
4effc5f
 
 
 
 
 
 
 
5806f45
 
 
 
 
4effc5f
5806f45
 
4effc5f
5806f45
4effc5f
5806f45
4effc5f
5806f45
4effc5f
5806f45
 
4effc5f
5806f45
4effc5f
560559b
4effc5f
 
 
 
 
5806f45
4effc5f
 
478d9f6
4effc5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32c9625
4effc5f
 
 
 
 
 
 
 
 
 
 
8050dad
4effc5f
 
 
 
 
 
 
5806f45
 
4effc5f
 
 
 
 
 
 
 
 
 
5806f45
4effc5f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# Loading script for the VilaQuAD dataset.

import json

import datasets

logger = datasets.logging.get_logger(__name__)

_CITATION = """\
Rodriguez-Penagos, Carlos Gerardo, & Armentano-Oller, Carme. (2021).
VilaQuAD: an extractive QA dataset for catalan, from Vilaweb newswire text
[Data set]. Zenodo. https://doi.org/10.5281/zenodo.4562337
"""

_DESCRIPTION = """\
This dataset contains 2095 of Catalan language news articles along with 1 to 5 questions referring to each fragment (or context).

VilaQuad articles are extracted from the daily Vilaweb (www.vilaweb.cat) and used under CC-by-nc-sa-nd (https://creativecommons.org/licenses/by-nc-nd/3.0/deed.ca) licence. 

This dataset can be used to build extractive-QA and Language Models.

Funded by the Generalitat de Catalunya, Departament de Polítiques Digitals i Administració Pública (AINA),

MT4ALL and Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).
"""

_HOMEPAGE = "https://doi.org/10.5281/zenodo.4562337"

_URL = "https://huggingface.co/datasets/projecte-aina/vilaquad/resolve/main/"
_TRAINING_FILE = "train.json"
_DEV_FILE = "dev.json"
_TEST_FILE = "test.json"


class VilaQuAD(datasets.GeneratorBasedBuilder):
    """VilaQuAD Dataset."""

    VERSION = datasets.Version("1.0.1")

    def _info(self):

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "context": datasets.Value("string"),
                    "question": datasets.Value("string"),
                    "answers": [
                        {
                            "text": datasets.Value("string"),
                            "answer_start": datasets.Value("int32"),
                        }
                    ],
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{_TRAINING_FILE}",
            "dev": f"{_URL}{_DEV_FILE}",
            "test": f"{_URL}{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            vilaquad = json.load(f)
            for article in vilaquad["data"]:
                title = article.get("title", "").strip()
                for paragraph in article["paragraphs"]:
                    context = paragraph["context"].strip()
                    for qa in paragraph["qas"]:
                        question = qa["question"].strip()
                        id_ = qa["id"]
                        # answer_starts = [answer["answer_start"] for answer in qa["answers"]]
                        # answers = [answer["text"].strip() for answer in qa["answers"]]
                        # Features currently used are "context", "question", and "answers".
                        # Others are extracted here for the ease of future expansions.
                        text = qa["answers"][0]["text"]
                        answer_start = qa["answers"][0]["answer_start"]

                        yield id_, {
                            "title": title,
                            "context": context,
                            "question": question,
                            "id": id_,
                            "answers": [{"text": text, "answer_start": answer_start}],
                        }