File size: 4,170 Bytes
45ff17a 157a5d5 45ff17a 157a5d5 45ff17a 157a5d5 45ff17a 157a5d5 45ff17a 157a5d5 45ff17a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
# coding=utf-8
# Lint as: python3
"""GermanDPR: A German-Language Dataset for Training Dense Passage Retrievers."""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\\\\
@misc{möller2021germanquad,
title={GermanQuAD and GermanDPR: Improving Non-English Question Answering and Passage Retrieval},
author={Timo Möller and Julian Risch and Malte Pietsch},
year={2021},
eprint={2104.12741},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\\\\
We take GermanQuAD as a starting point and add hard negatives from a dump of the full German Wikipedia following the approach of the DPR authors (Karpukhin et al., 2020). The format of the dataset also resembles the one of DPR. GermanDPR comprises 9275 question/answer pairs in the training set and 1025 pairs in the test set. For each pair, there are one positive context and three hard negative contexts.
"""
_URL = "https://germanquad.s3.amazonaws.com/GermanDPR.zip"
class GermanDPRConfig(datasets.BuilderConfig):
"""BuilderConfig for GermanDPR."""
def __init__(self, **kwargs):
"""BuilderConfig for GermanDPR.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GermanDPRConfig, self).__init__(**kwargs)
class GermanDPR(datasets.GeneratorBasedBuilder):
"""GermanDPR: A German-Language Dataset for Training Dense Passage Retrievers."""
BUILDER_CONFIGS = [
GermanDPRConfig(
name="plain_text",
version=datasets.Version("1.0.0", ""),
description="Plain text",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(datasets.Value("string")),
"positive_ctxs": datasets.features.Sequence(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"passage_id": datasets.Value("string"),
}
),
"negative_ctxs": datasets.features.Sequence(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"passage_id": datasets.Value("string"),
}
),
"hard_negative_ctxs": datasets.features.Sequence(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"passage_id": datasets.Value("string"),
}
),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://deepset.ai/germanquad",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir+"/GermanDPR/GermanDPR_train.json"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": dl_dir+"/GermanDPR/GermanDPR_test.json"},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
germandpr = json.load(f)
for qa in germandpr:
yield qa
|