parsinlu_entailment / parsinlu_entailment.py
Daniel Khashabi
add files 005e34f
1 # coding=utf-8
2 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """ParsiNLU Persian reading comprehension task"""
16
17 from __future__ import absolute_import, division, print_function
18
19 import csv
20 import json
21
22 import datasets
23
24
25 logger = datasets.logging.get_logger(__name__)
26
27 _CITATION = """\
28 @article{huggingface:dataset,
29 title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},
30 authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},
31 year={2020}
32 journal = {arXiv e-prints},
33 eprint = {2012.06154},
34 }
35 """
36
37 # You can copy an official description
38 _DESCRIPTION = """\
39 A Persian textual entailment task (deciding `sent1` entails `sent2`).
40 """
41
42 _HOMEPAGE = "https://github.com/persiannlp/parsinlu/"
43
44 _LICENSE = "CC BY-NC-SA 4.0"
45
46 _URL = "https://raw.githubusercontent.com/persiannlp/parsinlu/master/data/entailment/"
47 _URLs = {
48 "train": _URL + "train.csv",
49 "dev": _URL + "dev.csv",
50 "test": _URL + "test.csv",
51 }
52
53
54 class ParsinluReadingComprehension(datasets.GeneratorBasedBuilder):
55 """ParsiNLU Persian reading comprehension task."""
56
57 VERSION = datasets.Version("1.0.0")
58
59 BUILDER_CONFIGS = [
60 datasets.BuilderConfig(
61 name="parsinlu-repo", version=VERSION, description="ParsiNLU repository: query-paraphrasing"
62 ),
63 ]
64
65 def _info(self):
66 features = datasets.Features(
67 {
68 "sent1": datasets.Value("string"),
69 "sent2": datasets.Value("string"),
70 "category": datasets.Value("string"),
71 "label": datasets.Value("string"),
72 }
73 )
74
75 return datasets.DatasetInfo(
76 # This is the description that will appear on the datasets page.
77 description=_DESCRIPTION,
78 # This defines the different columns of the dataset and their types
79 features=features, # Here we define them above because they are different between the two configurations
80 # If there's a common (input, target) tuple from the features,
81 # specify them here. They'll be used if as_supervised=True in
82 # builder.as_dataset.
83 supervised_keys=None,
84 # Homepage of the dataset for documentation
85 homepage=_HOMEPAGE,
86 # License for the dataset if available
87 license=_LICENSE,
88 # Citation for the dataset
89 citation=_CITATION,
90 )
91
92 def _split_generators(self, dl_manager):
93 data_dir = dl_manager.download_and_extract(_URLs)
94 return [
95 datasets.SplitGenerator(
96 name=datasets.Split.TRAIN,
97 # These kwargs will be passed to _generate_examples
98 gen_kwargs={
99 "filepath": data_dir["train"],
100 "split": "train",
101 },
102 ),
103 datasets.SplitGenerator(
104 name=datasets.Split.TEST,
105 # These kwargs will be passed to _generate_examples
106 gen_kwargs={"filepath": data_dir["test"], "split": "test"},
107 ),
108 datasets.SplitGenerator(
109 name=datasets.Split.VALIDATION,
110 # These kwargs will be passed to _generate_examples
111 gen_kwargs={
112 "filepath": data_dir["dev"],
113 "split": "dev",
114 },
115 ),
116 ]
117
118 def _generate_examples(self, filepath, split):
119 logger.info("generating examples from = %s", filepath)
120
121 with open(filepath, encoding="utf-8") as f:
122 reader = csv.reader(f)
123
124 for id_, row in enumerate(reader):
125 if id_ == 0:
126 continue
127
128 sent1 = row[1].replace("\t", "").replace("\n", "")
129 sent2 = row[2].replace("\t", "").replace("\n", "")
130 label = row[3].replace("\t", "").replace("\n", "")
131 cat = row[4].replace("\t", "").replace("\n", "")
132 yield id_, {
133 'sent1': sent1,
134 'sent2': sent2,
135 'label': label,
136 'category': cat,
137 }
138