Datasets:
ibm
/

Languages:
English
Multilinguality:
monolingual
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
b9a5030
1 Parent(s): 9a65b20

Delete loading script

Browse files
Files changed (1) hide show
  1. duorc.py +0 -146
duorc.py DELETED
@@ -1,146 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """DuoRC: A Paraphrased
16
- Reading Comprehension Question Answering Dataset"""
17
-
18
-
19
- import json
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{DuoRC,
26
- author = { Amrita Saha and Rahul Aralikatte and Mitesh M. Khapra and Karthik Sankaranarayanan},\
27
- title = {{DuoRC: Towards Complex Language Understanding with Paraphrased Reading Comprehension}},
28
- booktitle = {Meeting of the Association for Computational Linguistics (ACL)},
29
- year = {2018}
30
- }
31
- """
32
-
33
-
34
- _DESCRIPTION = """\
35
- DuoRC contains 186,089 unique question-answer pairs created from a collection of 7680 pairs of movie plots where each pair in the collection reflects two versions of the same movie.
36
- """
37
-
38
- _HOMEPAGE = "https://duorc.github.io/"
39
-
40
- _LICENSE = "https://raw.githubusercontent.com/duorc/duorc/master/LICENSE"
41
-
42
- _URL = "https://raw.githubusercontent.com/duorc/duorc/master/dataset/"
43
- _URLs = {
44
- "SelfRC": {
45
- "train": _URL + "SelfRC_train.json",
46
- "dev": _URL + "SelfRC_dev.json",
47
- "test": _URL + "SelfRC_test.json",
48
- },
49
- "ParaphraseRC": {
50
- "train": _URL + "ParaphraseRC_train.json",
51
- "dev": _URL + "ParaphraseRC_dev.json",
52
- "test": _URL + "ParaphraseRC_test.json",
53
- },
54
- }
55
-
56
-
57
- class DuorcConfig(datasets.BuilderConfig):
58
- """BuilderConfig for DuoRC SelfRC."""
59
-
60
- def __init__(self, **kwargs):
61
- """BuilderConfig for DuoRC SelfRC.
62
- Args:
63
- **kwargs: keyword arguments forwarded to super.
64
- """
65
- super(DuorcConfig, self).__init__(**kwargs)
66
-
67
-
68
- class Duorc(datasets.GeneratorBasedBuilder):
69
- """DuoRC Dataset"""
70
-
71
- VERSION = datasets.Version("1.0.0")
72
- BUILDER_CONFIGS = [
73
- DuorcConfig(name="SelfRC", version=VERSION, description="SelfRC dataset"),
74
- DuorcConfig(name="ParaphraseRC", version=VERSION, description="ParaphraseRC dataset"),
75
- ]
76
-
77
- def _info(self):
78
- return datasets.DatasetInfo(
79
- # This is the description that will appear on the datasets page.
80
- description=_DESCRIPTION,
81
- # This defines the different columns of the dataset and their types
82
- features=datasets.Features(
83
- {
84
- "plot_id": datasets.Value("string"),
85
- "plot": datasets.Value("string"),
86
- "title": datasets.Value("string"),
87
- "question_id": datasets.Value("string"),
88
- "question": datasets.Value("string"),
89
- "answers": datasets.features.Sequence(datasets.Value("string")),
90
- "no_answer": datasets.Value("bool"),
91
- }
92
- ),
93
- supervised_keys=None,
94
- homepage=_HOMEPAGE,
95
- license=_LICENSE,
96
- citation=_CITATION,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- """Returns SplitGenerators."""
101
- my_urls = _URLs[self.config.name]
102
- downloaded_files = dl_manager.download_and_extract(my_urls)
103
- return [
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TRAIN,
106
- gen_kwargs={
107
- "filepath": downloaded_files["train"],
108
- },
109
- ),
110
- datasets.SplitGenerator(
111
- name=datasets.Split.VALIDATION,
112
- gen_kwargs={
113
- "filepath": downloaded_files["dev"],
114
- },
115
- ),
116
- datasets.SplitGenerator(
117
- name=datasets.Split.TEST,
118
- gen_kwargs={
119
- "filepath": downloaded_files["test"],
120
- },
121
- ),
122
- ]
123
-
124
- def _generate_examples(self, filepath):
125
- """This function returns the examples in the raw (text) form."""
126
- with open(filepath, encoding="utf-8") as f:
127
- duorc = json.load(f)
128
- for example in duorc:
129
- plot_id = example["id"]
130
- plot = example["plot"].strip()
131
- title = example["title"].strip()
132
- for qas in example["qa"]:
133
- question_id = qas["id"]
134
- question = qas["question"].strip()
135
- answers = [answer.strip() for answer in qas["answers"]]
136
- no_answer = qas["no_answer"]
137
-
138
- yield question_id, {
139
- "title": title,
140
- "plot": plot,
141
- "question": question,
142
- "plot_id": plot_id,
143
- "question_id": question_id,
144
- "answers": answers,
145
- "no_answer": no_answer,
146
- }