# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Dataset loading script for the QA-Discourse dataset (Pyatkin et. al., ACL 2020).""" import datasets from pathlib import Path from typing import List import pandas as pd _CITATION = """\ @inproceedings{pyatkin2020qadiscourse, title={QADiscourse-Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines}, author={Pyatkin, Valentina and Klein, Ayal and Tsarfaty, Reut and Dagan, Ido}, booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, pages={2804--2819}, year={2020} }""" _DESCRIPTION = """\ The dataset contains question-answer pairs to model discourse relations. While answers roughly correspond to spans of the sentence, these spans could have been freely adjusted by annotators to grammaticaly fit the question; Therefore, answers are given just as text and not as identified spans of the original sentence. See the paper for details: QADiscourse - Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines, Pyatkin et. al., 2020 """ _HOMEPAGE = "https://github.com/ValentinaPy/QADiscourse" _LICENSE = """Resources on this page are licensed CC-BY 4.0, a Creative Commons license requiring Attribution (https://creativecommons.org/licenses/by/4.0/).""" _URLs = { "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv", "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv", "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv", "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv", "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv", "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv", } COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer', 'question_start', 'question_aux', 'question_body', 'answer', 'untokenized sentence', 'target indices for untok sent'] # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case class QaDiscourse(datasets.GeneratorBasedBuilder): """QA-Discourse: Discourse Relations as Question-Answer Pairs. """ VERSION = datasets.Version("1.0.2") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="plain_text", version=VERSION, description="This provides the QA-Discourse dataset" ), ] DEFAULT_CONFIG_NAME = ( "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense. ) def _info(self): features = datasets.Features( { "sentence": datasets.Value("string"), "sent_id": datasets.Value("string"), "question": datasets.Sequence(datasets.Value("string")), "answers": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): """Returns SplitGenerators.""" # Download and prepare all files - keep same structure as _URLs corpora = {section: Path(dl_manager.download_and_extract(_URLs[section])) for section in _URLs} return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepaths": [corpora["wikinews.train"], corpora["wikipedia.train"]], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepaths": [corpora["wikinews.dev"], corpora["wikipedia.dev"]], }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepaths": [corpora["wikinews.test"], corpora["wikipedia.test"]], }, ), ] def _generate_examples(self, filepaths: List[str]): """ Yields QA-Discourse examples from a tsv file. Sentences with no QAs will yield an ``empty QA'' record, where both 'question' and 'answers' are empty lists. """ # merge annotations from sections df = pd.concat([pd.read_csv(fn, sep='\t', error_bad_lines=False) for fn in filepaths]).reset_index(drop=True) df = df.applymap(str) # must turn all values to strings explicitly to avoid type errors for counter, row in df.iterrows(): # Prepare question (3 "slots" and question mark) question = [row.question_start, row.question_aux, row.question_body.rstrip('?'), '?'] answer = [row.answer] if row.question_start == "_": # sentence has no QAs question = [] answer = [] yield counter, { "sentence": row.sentence, "sent_id": row.qasrl_id, "question": question, "answers": answer, }