File size: 5,755 Bytes
f9d3e02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
027d08a
 
 
 
f9d3e02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
import datasets
import os
import json
import wikidata
import pickle
from wikidata.client import Client

client = Client()

_DESCRIPTION = """\
HuggingFace wrapper for https://github.com/vladislavneon/RuBQ dataset
"""

_HOMEPAGE = "https://zenodo.org/record/4345697#.Y01k81JBy3I"


_LICENSE = "Attribution-ShareAlike 4.0 International"

_LANGS = ["ru","en"]


_URLS = {
    "test": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_test.json",
    "dev": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_dev.json",
}


_DATA_DIRECTORY = "."
VERSION = datasets.Version("0.0.1")


class WikidataRuBQConfig(datasets.BuilderConfig):
    """BuilderConfig for WikidataRuBQ."""

    def __init__(self, **kwargs):
        """BuilderConfig for WikidataRuBQ.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(WikidataRuBQConfig, self).__init__(**kwargs)


class WikidataRuBQ(datasets.GeneratorBasedBuilder):
    """HuggingFace wrapper https://github.com/vladislavneon/RuBQ/tree/master/RuBQ_2.0 dataset"""

    BUILDER_CONFIG_CLASS = WikidataRuBQConfig
    BUILDER_CONFIGS = []
    BUILDER_CONFIGS += [
        WikidataRuBQConfig(
            name=f"multiple_{ln}",
            version=VERSION,
            description="questions with russian multiple labels as answers",
        )
        for ln in _LANGS
    ]

    DEFAULT_CONFIG_NAME = "multiple_en"

    def _info(self):
        features = datasets.Features(
            {
                "object": datasets.Sequence(datasets.Value("string")),
                "question": datasets.Value("string")
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        if self.config.name == "default":
            version, lang = "multiple", "en"
        else:
            version, lang = self.config.name.split("_")

        if lang not in _LANGS:
            raise ValueError(f"Language {lang} not supported")

        downloaded_files = dl_manager.download_and_extract(_URLS)

        data_dir = os.path.join(self.base_path, '')
        vocab_path = os.path.join(data_dir, "reverse_vocab_wikidata_en.json")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": downloaded_files["dev"],
                    "lang": lang,
                    "vocab_path": vocab_path,
                    "split": 'train',
                    "data_dir": data_dir
                }),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": downloaded_files["dev"],
                    "lang": lang,
                    "vocab_path": vocab_path,
                    "split": 'validation',
                    "data_dir": data_dir
                }),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": downloaded_files["test"],
                    "lang": lang,
                    "vocab_path": vocab_path,
                    "split": 'test',
                    "data_dir": data_dir
                })
        ]

    def get_name(self, idd):
        '''
        This function returns a name of an entity and its description given WikiData id
        input:  (str) wikidata id, e.x. 'Q2'
        output: (str) concatenated 'name, description' of a given entity
        '''
        entity = client.get(idd, load=True)
        name = None
        try:
            name = entity.data["labels"]["en"]["value"]
        except:
            pass
        return name

    def _generate_examples(self, filepath, lang, vocab_path, split, data_dir):
        if split == 'test':
            direct_path = os.path.join(data_dir, f"test_direct_vocab_wikidata_en.pkl")
        else:
            direct_path = os.path.join(data_dir, f"train_direct_vocab_wikidata_en.pkl")

        with open(direct_path, 'rb') as handle:
            direct_vocab = pickle.load(handle)

        with open(filepath, encoding="utf-8") as f:
            item = json.load(f)
            uid_slide = 0
            for i in item:
                question = i['question_text'] if lang == 'ru' else i['question_eng']

                objects = list(set(
                    [answer['value'].split('entity/')[1] for answer in i['answers'] if '/Q' in answer['value']]
                ))

                if len(set(objects)) >= 1:
                    if split == 'train':
                        for obj in set(objects):
                            key = i['uid'] + uid_slide
                            resolved_obj = direct_vocab.get(obj, None)
                            if resolved_obj is not None:
                                resolved_obj = resolved_obj[0].upper() + resolved_obj[1:]

                                uid_slide += 1

                                yield (
                                    key,
                                    {
                                        "object": [resolved_obj],
                                        "question": question,
                                    }
                                )
                    else:
                        key = i['uid'] + uid_slide
                        yield (
                            key,
                            {
                                "object": objects,
                                "question": question,
                            }
                        )