File size: 3,925 Bytes
5b2804d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2aab1cb
5b2804d
 
2aab1cb
5b2804d
 
 
304fa12
5b2804d
 
 
 
2aab1cb
 
 
 
5b2804d
 
 
 
 
 
 
2aab1cb
5b2804d
 
 
 
 
 
 
 
 
 
 
 
2aab1cb
5b2804d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a8c5d0
5b2804d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# ok
import os
import json
import datasets

_DESCRIPTION = """MQA is a multilingual corpus of questions and answers parsed from the Common Crawl. Questions are divided between Frequently Asked Questions (FAQ) pages and Community Question Answering (CQA) pages."""
_HOMEPAGE_URL = "https://huggingface.co/datasets/clips/mqa"
_CITATION = """
@misc{debruyn2021mfaq,
      title={MFAQ: a Multilingual FAQ Dataset}, 
      author={Maxime {De Bruyn} and Ehsan Lotfi and Jeska Buhmann and Walter Daelemans},
      year={2021},
      booktitle={MRQA@EMNLP2021},
}
"""
_VERSION = "0.4"
_BASE_NAME = ""
_TRAIN_BASE_URL = "data/{}.json"
_TRAIN_NEG_URL = "data/{}.neg.json"
_LANGUAGES = [
    "en", "de", "es", "fr",
    "ru", "ja", "it", "zh", "pt",
    "nl", "tr", "pl", "vi", "ar",
    "id", "uk", "ro", "no", 
    "sv", "el", "fi", "da",
    "cs", "ko", "fa", "hi", "hu",
    "sk", "lt", "et", "hr", "is",
    "lv", "ms", "bg", "sr", "ca"
]

class MFAQLightConfig(datasets.BuilderConfig):
    def __init__(self, *args, language="en", negatives=True, **kwargs):
        super().__init__(
            *args,
            name=f"{language}-{negatives}",
            **kwargs,
        )
        self.language = language
        self.negatives = negatives
    
class MFAQLight(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = []
    for language in _LANGUAGES:
        BUILDER_CONFIGS.append(MFAQLightConfig(language=language, negatives=True))
        BUILDER_CONFIGS.append(MFAQLightConfig(language=language, negatives=False))
    BUILDER_CONFIGS.append(MFAQLightConfig(language="all", negatives=True))
    BUILDER_CONFIGS.append(MFAQLightConfig(language="all", negatives=False))
    BUILDER_CONFIG_CLASS = MFAQLightConfig
    
    def _info(self):
        features = {
            "question": datasets.Value("string"),
            "answer": datasets.Value("string"),
            "id": datasets.Value("string"),
            "negatives": [datasets.Value("string")]
        }
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            supervised_keys=None,
            homepage=_HOMEPAGE_URL,
            citation=_CITATION,
        )
    
    def _split_generators(self, dl_manager):
        train_filenames = []
        languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
        base_path = _TRAIN_NEG_URL if self.config.negatives else _TRAIN_BASE_URL
        for language in languages:
            path = dl_manager.download_and_extract(base_path.format(language))
            train_filenames.append(path)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filenames": train_filenames},
            )
        ]

    def _generate_examples(self, filenames):
        for filename in filenames:
            with open(filename, "r") as f:
                for _, line in enumerate(f):
                    question = json.loads(line)
                    yield question["id"], {
                        "question": question["question"],
                        "answer": question["answer"],
                        "id": question["id"],
                        "negatives": question.get("negative", [])
                    }