#!/usr/bin/env python3 # -*- coding: utf-8 -*- import io import json import os import datasets from datasets.tasks import QuestionAnsweringExtractive logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{keren2021parashoot, title={ParaShoot: A Hebrew Question Answering Dataset}, author={Keren, Omri and Levy, Omer}, booktitle={Proceedings of the 3rd Workshop on Machine Reading for Question Answering}, pages={106--112}, year={2021} } """ _DESCRIPTION = """ A Hebrew question and answering dataset in the style of SQuAD, based on articles scraped from Wikipedia. The dataset contains a few thousand crowdsource-annotated pairs of questions and answers, in a setting suitable for few-shot learning. """ _URLS = { "train": "data/train.tar.gz", "validation": "data/dev.tar.gz", "test": "data/test.tar.gz", } class ParashootConfig(datasets.BuilderConfig): """BuilderConfig for Parashoot.""" def __init__(self, **kwargs): """BuilderConfig for Parashoot. Args: **kwargs: keyword arguments forwarded to super. """ super(ParashootConfig, self).__init__(**kwargs) class Parashoot(datasets.GeneratorBasedBuilder): """Parashoot: The Hebrew Question Answering Dataset. Version 1.1.""" BUILDER_CONFIGS = [ ParashootConfig( version=datasets.Version("1.1.0", ""), description=_DESCRIPTION, ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.features.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://github.com/omrikeren/ParaShoot", citation=_CITATION, task_templates=[ QuestionAnsweringExtractive( question_column="question", context_column="context", answers_column="answers", ) ], ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": dl_manager.iter_archive(downloaded_files["train"]), "basename": "train.jsonl", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": dl_manager.iter_archive(downloaded_files["validation"]), "basename": "dev.jsonl", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": dl_manager.iter_archive(downloaded_files["test"]), "basename": "test.jsonl", }, ), ] def _generate_examples(self, filepath, basename): """This function returns the examples in the raw (text) form.""" logger.info("generating examples from = %s", filepath) key = 0 for file_path, file_obj in filepath: with io.BytesIO(file_obj.read()) as f: for line in f: article = json.loads(line) title = article.get("title", "") context = article["context"] answer_starts = article["answers"]["answer_start"] answers = article["answers"]["text"] yield key, { "title": title, "context": context, "question": article["question"], "id": article["id"], "answers": { "answer_start": answer_starts, "text": answers, }, } key += 1