Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
expert-generated
found
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
File size: 5,126 Bytes
9341178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b420292
 
9341178
 
 
b420292
 
9341178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10a08b1
9341178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
"""XOR QA: Cross-lingual Open-Retrieval Question Answering"""


import json
import textwrap

import datasets


_XOR_TYDI_QA_CITATION = """\
    @misc{asai2020xor,
      title={XOR QA: Cross-lingual Open-Retrieval Question Answering},
      author={Akari Asai and Jungo Kasai and Jonathan H. Clark and Kenton Lee and Eunsol Choi and Hannaneh Hajishirzi},
      year={2020},
      eprint={2010.11856},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
"""

_XOR_TYDI_QA_DESCRIPTION = """\
    XOR-TyDi QA brings together for the first time information-seeking questions,
    open-retrieval QA, and multilingual QA to create a multilingual open-retrieval
    QA dataset that enables cross-lingual answer retrieval. It consists of questions
    written by information-seeking native speakers in 7 typologically diverse languages
    and answer annotations that are retrieved from multilingual document collections.
    There are three sub-tasks: XOR-Retrieve, XOR-EnglishSpan, and XOR-Full.
"""

_DESCRIPTIONS = {
    "xor-retrieve": textwrap.dedent(
        """\
        XOR-Retrieve is a cross-lingual retrieval task where a question is written in the target
        language (e.g., Japanese) and a system is required to retrieve English document that answers the question.
        """
    ),
    "xor-full": textwrap.dedent(
        """\
        XOR-Full is a cross-lingual retrieval task where a question is written in the target
        language (e.g., Japanese) and a system is required to output a short answer in the target language."""
    ),
}

_DATA_URLS = {
    "xor-retrieve": {
        "train": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_train_retrieve_eng_span.jsonl",
        "dev": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_dev_retrieve_eng_span_v1_1.jsonl",
        "test": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_test_retrieve_eng_span_q_only_v1_1.jsonl",
    },
    "xor-full": {
        "train": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_train_full.jsonl",
        "dev": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_dev_full_v1_1.jsonl",
        "test": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_test_full_q_only_v1_1.jsonl",
    },
}

_XOR_TYDI_QA_URL = "https://nlp.cs.washington.edu/xorqa/"


class XORTyDiConfig(datasets.BuilderConfig):
    "BuilderConfig for XOR-TyDi Dataset"

    def __init__(self, data_url, citation, url, **kwargs):
        """
        Args:

        data_url: `dictionary`, dict with url for each split of data.
        citation: `string`, citation for the dataset.
        url: `string`, url for information about the dataset.
        **kwargs: keyword arguments forwarded to super.
        """
        super(XORTyDiConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs)
        self.data_url = data_url
        self.citation = citation
        self.url = url


class XORTyDi(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        XORTyDiConfig(
            name=name,
            description=_DESCRIPTIONS[name],
            data_url=_DATA_URLS[name],
            citation=_XOR_TYDI_QA_CITATION,
            url=_XOR_TYDI_QA_URL,
        )
        for name in ["xor-retrieve", "xor-full"]
    ]

    def _info(self):
        features = {}
        features["question"] = datasets.Value("string")
        features["lang"] = datasets.features.ClassLabel(names=["ar", "bn", "fi", "ja", "ko", "ru", "te"])
        features["answers"] = datasets.Value("string")

        return datasets.DatasetInfo(
            description=_XOR_TYDI_QA_DESCRIPTION + "\n" + self.config.description,
            features=datasets.Features(features),
            homepage=self.config.url,
            citation=_XOR_TYDI_QA_CITATION,
        )

    def _split_generators(self, dl_manager):
        train = dl_manager.download_and_extract(self.config.data_url["train"])
        dev = dl_manager.download_and_extract(self.config.data_url["dev"])
        test = dl_manager.download_and_extract(self.config.data_url["test"])

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
        ]

    def _generate_examples(self, filepath, split):
        with open(filepath, encoding="utf-8") as f:
            jlines = f.read()
            result = [json.loads(jline) for jline in jlines.splitlines()]
            if split == "test":
                for id_, row in enumerate(result):
                    yield id_, {"question": row["question"], "answers": "None", "lang": row["lang"].strip()}
            else:
                for id_, row in enumerate(result):
                    yield id_, {
                        "question": row["question"],
                        "answers": " ".join(row["answers"]),
                        "lang": row["lang"].strip(),
                    }