EricR401S commited on
Commit
de05476
1 Parent(s): 450c828
boiler_plate_check_functions.ipynb ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "source": [
7
+ "\"\"\"This script's purpose is to re-define the datset loading functions to better suit this specific\n",
8
+ "reddit posts dataset.\"\"\"\n",
9
+ "\n",
10
+ "import csv\n",
11
+ "import json\n",
12
+ "import os"
13
+ ],
14
+ "outputs": [],
15
+ "metadata": {}
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 1,
20
+ "source": [
21
+ "import csv\n",
22
+ "import json\n",
23
+ "import os"
24
+ ],
25
+ "outputs": [],
26
+ "metadata": {}
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 2,
31
+ "source": [
32
+ "from datasets import load_dataset\n",
33
+ "\n",
34
+ ""
35
+ ],
36
+ "outputs": [
37
+ {
38
+ "output_type": "stream",
39
+ "name": "stderr",
40
+ "text": [
41
+ "c:\\Users\\ericr\\miniconda3\\envs\\sta663C\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
42
+ " from .autonotebook import tqdm as notebook_tqdm\n"
43
+ ]
44
+ }
45
+ ],
46
+ "metadata": {}
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 3,
51
+ "source": [
52
+ "ds = load_dataset('rotten_tomatoes')"
53
+ ],
54
+ "outputs": [
55
+ {
56
+ "output_type": "stream",
57
+ "name": "stderr",
58
+ "text": [
59
+ "Downloading data: 100%|██████████| 699k/699k [00:00<00:00, 2.88MB/s]\n",
60
+ "Downloading data: 100%|██████████| 90.0k/90.0k [00:00<00:00, 90.7kB/s]\n",
61
+ "Downloading data: 100%|██████████| 92.2k/92.2k [00:00<00:00, 1.56MB/s]\n",
62
+ "Generating train split: 100%|██████████| 8530/8530 [00:00<00:00, 448827.83 examples/s]\n",
63
+ "Generating validation split: 100%|██████████| 1066/1066 [00:00<00:00, 94627.05 examples/s]\n",
64
+ "Generating test split: 100%|██████████| 1066/1066 [00:00<00:00, 127328.15 examples/s]\n"
65
+ ]
66
+ }
67
+ ],
68
+ "metadata": {}
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 4,
73
+ "source": [
74
+ "ds"
75
+ ],
76
+ "outputs": [
77
+ {
78
+ "output_type": "execute_result",
79
+ "data": {
80
+ "text/plain": [
81
+ "DatasetDict({\n",
82
+ " train: Dataset({\n",
83
+ " features: ['text', 'label'],\n",
84
+ " num_rows: 8530\n",
85
+ " })\n",
86
+ " validation: Dataset({\n",
87
+ " features: ['text', 'label'],\n",
88
+ " num_rows: 1066\n",
89
+ " })\n",
90
+ " test: Dataset({\n",
91
+ " features: ['text', 'label'],\n",
92
+ " num_rows: 1066\n",
93
+ " })\n",
94
+ "})"
95
+ ]
96
+ },
97
+ "metadata": {},
98
+ "execution_count": 4
99
+ }
100
+ ],
101
+ "metadata": {}
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 5,
106
+ "source": [
107
+ "ds.citation"
108
+ ],
109
+ "outputs": [
110
+ {
111
+ "output_type": "error",
112
+ "ename": "AttributeError",
113
+ "evalue": "'DatasetDict' object has no attribute 'citation'",
114
+ "traceback": [
115
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
116
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
117
+ "Cell \u001b[1;32mIn[5], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m ds\u001b[39m.\u001b[39;49mcitation\n",
118
+ "\u001b[1;31mAttributeError\u001b[0m: 'DatasetDict' object has no attribute 'citation'"
119
+ ]
120
+ }
121
+ ],
122
+ "metadata": {}
123
+ },
124
+ {
125
+ "cell_type": "code",
126
+ "execution_count": 6,
127
+ "source": [
128
+ "ds.citation()"
129
+ ],
130
+ "outputs": [
131
+ {
132
+ "output_type": "error",
133
+ "ename": "AttributeError",
134
+ "evalue": "'DatasetDict' object has no attribute 'citation'",
135
+ "traceback": [
136
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
137
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
138
+ "Cell \u001b[1;32mIn[6], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m ds\u001b[39m.\u001b[39;49mcitation()\n",
139
+ "\u001b[1;31mAttributeError\u001b[0m: 'DatasetDict' object has no attribute 'citation'"
140
+ ]
141
+ }
142
+ ],
143
+ "metadata": {}
144
+ },
145
+ {
146
+ "cell_type": "code",
147
+ "execution_count": 7,
148
+ "source": [
149
+ "ds.column_names"
150
+ ],
151
+ "outputs": [
152
+ {
153
+ "output_type": "execute_result",
154
+ "data": {
155
+ "text/plain": [
156
+ "{'train': ['text', 'label'],\n",
157
+ " 'validation': ['text', 'label'],\n",
158
+ " 'test': ['text', 'label']}"
159
+ ]
160
+ },
161
+ "metadata": {},
162
+ "execution_count": 7
163
+ }
164
+ ],
165
+ "metadata": {}
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 8,
170
+ "source": [
171
+ "ds['train']"
172
+ ],
173
+ "outputs": [
174
+ {
175
+ "output_type": "execute_result",
176
+ "data": {
177
+ "text/plain": [
178
+ "Dataset({\n",
179
+ " features: ['text', 'label'],\n",
180
+ " num_rows: 8530\n",
181
+ "})"
182
+ ]
183
+ },
184
+ "metadata": {},
185
+ "execution_count": 8
186
+ }
187
+ ],
188
+ "metadata": {}
189
+ },
190
+ {
191
+ "cell_type": "code",
192
+ "execution_count": 9,
193
+ "source": [
194
+ "ds.license"
195
+ ],
196
+ "outputs": [
197
+ {
198
+ "output_type": "error",
199
+ "ename": "AttributeError",
200
+ "evalue": "'DatasetDict' object has no attribute 'license'",
201
+ "traceback": [
202
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
203
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
204
+ "Cell \u001b[1;32mIn[9], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m ds\u001b[39m.\u001b[39;49mlicense\n",
205
+ "\u001b[1;31mAttributeError\u001b[0m: 'DatasetDict' object has no attribute 'license'"
206
+ ]
207
+ }
208
+ ],
209
+ "metadata": {}
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": 10,
214
+ "source": [
215
+ "ds.dataset_size"
216
+ ],
217
+ "outputs": [
218
+ {
219
+ "output_type": "error",
220
+ "ename": "AttributeError",
221
+ "evalue": "'DatasetDict' object has no attribute 'dataset_size'",
222
+ "traceback": [
223
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
224
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
225
+ "Cell \u001b[1;32mIn[10], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m ds\u001b[39m.\u001b[39;49mdataset_size\n",
226
+ "\u001b[1;31mAttributeError\u001b[0m: 'DatasetDict' object has no attribute 'dataset_size'"
227
+ ]
228
+ }
229
+ ],
230
+ "metadata": {}
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": 11,
235
+ "source": [
236
+ "ds._info"
237
+ ],
238
+ "outputs": [
239
+ {
240
+ "output_type": "error",
241
+ "ename": "AttributeError",
242
+ "evalue": "'DatasetDict' object has no attribute '_info'",
243
+ "traceback": [
244
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
245
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
246
+ "Cell \u001b[1;32mIn[11], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m ds\u001b[39m.\u001b[39;49m_info\n",
247
+ "\u001b[1;31mAttributeError\u001b[0m: 'DatasetDict' object has no attribute '_info'"
248
+ ]
249
+ }
250
+ ],
251
+ "metadata": {}
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": 12,
256
+ "source": [
257
+ "ds['train']._info"
258
+ ],
259
+ "outputs": [
260
+ {
261
+ "output_type": "execute_result",
262
+ "data": {
263
+ "text/plain": [
264
+ "DatasetInfo(description=\"Movie Review Dataset.\\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\\nsentiment categorization with respect to rating scales.'', Proceedings of the\\nACL, 2005.\\n\", citation='@InProceedings{Pang+Lee:05a,\\n author = {Bo Pang and Lillian Lee},\\n title = {Seeing stars: Exploiting class relationships for sentiment\\n categorization with respect to rating scales},\\n booktitle = {Proceedings of the ACL},\\n year = 2005\\n}\\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='parquet', dataset_name='rotten_tomatoes', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1075873, num_examples=8530, shard_lengths=None, dataset_name='rotten_tomatoes'), 'validation': SplitInfo(name='validation', num_bytes=134809, num_examples=1066, shard_lengths=None, dataset_name='rotten_tomatoes'), 'test': SplitInfo(name='test', num_bytes=136102, num_examples=1066, shard_lengths=None, dataset_name='rotten_tomatoes')}, download_checksums={'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/train/0000.parquet': {'num_bytes': 698845, 'checksum': None}, 'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/validation/0000.parquet': {'num_bytes': 90001, 'checksum': None}, 'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/test/0000.parquet': {'num_bytes': 92206, 'checksum': None}}, download_size=881052, post_processing_size=None, dataset_size=1346784, size_in_bytes=2227836)"
265
+ ]
266
+ },
267
+ "metadata": {},
268
+ "execution_count": 12
269
+ }
270
+ ],
271
+ "metadata": {}
272
+ },
273
+ {
274
+ "cell_type": "code",
275
+ "execution_count": 13,
276
+ "source": [
277
+ "print(ds['train']._info)"
278
+ ],
279
+ "outputs": [
280
+ {
281
+ "output_type": "stream",
282
+ "name": "stdout",
283
+ "text": [
284
+ "DatasetInfo(description=\"Movie Review Dataset.\\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\\nsentiment categorization with respect to rating scales.'', Proceedings of the\\nACL, 2005.\\n\", citation='@InProceedings{Pang+Lee:05a,\\n author = {Bo Pang and Lillian Lee},\\n title = {Seeing stars: Exploiting class relationships for sentiment\\n categorization with respect to rating scales},\\n booktitle = {Proceedings of the ACL},\\n year = 2005\\n}\\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='parquet', dataset_name='rotten_tomatoes', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1075873, num_examples=8530, shard_lengths=None, dataset_name='rotten_tomatoes'), 'validation': SplitInfo(name='validation', num_bytes=134809, num_examples=1066, shard_lengths=None, dataset_name='rotten_tomatoes'), 'test': SplitInfo(name='test', num_bytes=136102, num_examples=1066, shard_lengths=None, dataset_name='rotten_tomatoes')}, download_checksums={'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/train/0000.parquet': {'num_bytes': 698845, 'checksum': None}, 'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/validation/0000.parquet': {'num_bytes': 90001, 'checksum': None}, 'hf://datasets/rotten_tomatoes@cab0f883b39cfb510c34e41db874679b3e2bafa3/default/test/0000.parquet': {'num_bytes': 92206, 'checksum': None}}, download_size=881052, post_processing_size=None, dataset_size=1346784, size_in_bytes=2227836)\n"
285
+ ]
286
+ }
287
+ ],
288
+ "metadata": {}
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "execution_count": 14,
293
+ "source": [
294
+ "print(ds['train']._info.description)"
295
+ ],
296
+ "outputs": [
297
+ {
298
+ "output_type": "stream",
299
+ "name": "stdout",
300
+ "text": [
301
+ "Movie Review Dataset.\n",
302
+ "This is a dataset of containing 5,331 positive and 5,331 negative processed\n",
303
+ "sentences from Rotten Tomatoes movie reviews. This data was first used in Bo\n",
304
+ "Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\n",
305
+ "sentiment categorization with respect to rating scales.'', Proceedings of the\n",
306
+ "ACL, 2005.\n",
307
+ "\n"
308
+ ]
309
+ }
310
+ ],
311
+ "metadata": {}
312
+ }
313
+ ],
314
+ "nbformat": 4,
315
+ "nbformat_minor": 2,
316
+ "metadata": {
317
+ "language_info": {
318
+ "codemirror_mode": {
319
+ "name": "ipython",
320
+ "version": 3
321
+ },
322
+ "file_extension": ".py",
323
+ "mimetype": "text/x-python",
324
+ "name": "python",
325
+ "nbconvert_exporter": "python",
326
+ "pygments_lexer": "ipython3",
327
+ "version": 3
328
+ }
329
+ }
330
+ }
reddit_dataset_loader.py CHANGED
@@ -47,7 +47,7 @@ which have risen in response to the clashes between traditional gender roles and
47
  _HOMEPAGE = "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles"
48
 
49
  # TODO: Add the licence for the dataset here if you can find it
50
- _LICENSE = "Creative Commons"
51
 
52
  # TODO: Add link to the official dataset URLs here
53
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
@@ -94,99 +94,99 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
94
 
95
  DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
96
 
97
- def _info(self):
98
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
99
- if (
100
- self.config.name == "first_domain"
101
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
102
- features = datasets.Features(
103
- {
104
- "sentence": datasets.Value("string"),
105
- "option1": datasets.Value("string"),
106
- "answer": datasets.Value("string"),
107
- # These are the features of your dataset like images, labels ...
108
- }
109
- )
110
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
111
- features = datasets.Features(
112
- {
113
- "sentence": datasets.Value("string"),
114
- "option2": datasets.Value("string"),
115
- "second_domain_answer": datasets.Value("string"),
116
- # These are the features of your dataset like images, labels ...
117
- }
118
- )
119
- return datasets.DatasetInfo(
120
- # This is the description that will appear on the datasets page.
121
- description=_DESCRIPTION,
122
- # This defines the different columns of the dataset and their types
123
- features=features, # Here we define them above because they are different between the two configurations
124
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
125
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
126
- # supervised_keys=("sentence", "label"),
127
- # Homepage of the dataset for documentation
128
- homepage=_HOMEPAGE,
129
- # License for the dataset if available
130
- license=_LICENSE,
131
- # Citation for the dataset
132
- citation=_CITATION,
133
- )
134
-
135
- def _split_generators(self, dl_manager):
136
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
137
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
138
-
139
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
140
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
141
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
142
- urls = _URLS[self.config.name]
143
- data_dir = dl_manager.download_and_extract(urls)
144
- return [
145
- datasets.SplitGenerator(
146
- name=datasets.Split.TRAIN,
147
- # These kwargs will be passed to _generate_examples
148
- gen_kwargs={
149
- "filepath": os.path.join(data_dir, "train.jsonl"),
150
- "split": "train",
151
- },
152
- ),
153
- datasets.SplitGenerator(
154
- name=datasets.Split.VALIDATION,
155
- # These kwargs will be passed to _generate_examples
156
- gen_kwargs={
157
- "filepath": os.path.join(data_dir, "dev.jsonl"),
158
- "split": "dev",
159
- },
160
- ),
161
- datasets.SplitGenerator(
162
- name=datasets.Split.TEST,
163
- # These kwargs will be passed to _generate_examples
164
- gen_kwargs={
165
- "filepath": os.path.join(data_dir, "test.jsonl"),
166
- "split": "test",
167
- },
168
- ),
169
- ]
170
-
171
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
172
- def _generate_examples(self, filepath, split):
173
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
174
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
175
- with open(filepath, encoding="utf-8") as f:
176
- for key, row in enumerate(f):
177
- data = json.loads(row)
178
- if self.config.name == "first_domain":
179
- # Yields examples as (key, example) tuples
180
- yield key, {
181
- "sentence": data["sentence"],
182
- "option1": data["option1"],
183
- "answer": "" if split == "test" else data["answer"],
184
- }
185
- else:
186
- yield key, {
187
- "sentence": data["sentence"],
188
- "option2": data["option2"],
189
- "second_domain_answer": (
190
- "" if split == "test" else data["second_domain_answer"]
191
- ),
192
- }
 
47
  _HOMEPAGE = "https://huggingface.co/datasets/steamcyclone/Pill_Ideologies-Post_Titles"
48
 
49
  # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = "Creative Commons" # cc
51
 
52
  # TODO: Add link to the official dataset URLs here
53
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
 
94
 
95
  DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
96
 
97
+ # def _info(self):
98
+ # # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
99
+ # if (
100
+ # self.config.name == "first_domain"
101
+ # ): # This is the name of the configuration selected in BUILDER_CONFIGS above
102
+ # features = datasets.Features(
103
+ # {
104
+ # "sentence": datasets.Value("string"),
105
+ # "option1": datasets.Value("string"),
106
+ # "answer": datasets.Value("string"),
107
+ # # These are the features of your dataset like images, labels ...
108
+ # }
109
+ # )
110
+ # else: # This is an example to show how to have different features for "first_domain" and "second_domain"
111
+ # features = datasets.Features(
112
+ # {
113
+ # "sentence": datasets.Value("string"),
114
+ # "option2": datasets.Value("string"),
115
+ # "second_domain_answer": datasets.Value("string"),
116
+ # # These are the features of your dataset like images, labels ...
117
+ # }
118
+ # )
119
+ # return datasets.DatasetInfo(
120
+ # # This is the description that will appear on the datasets page.
121
+ # description=_DESCRIPTION,
122
+ # # This defines the different columns of the dataset and their types
123
+ # features=features, # Here we define them above because they are different between the two configurations
124
+ # # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
125
+ # # specify them. They'll be used if as_supervised=True in builder.as_dataset.
126
+ # # supervised_keys=("sentence", "label"),
127
+ # # Homepage of the dataset for documentation
128
+ # homepage=_HOMEPAGE,
129
+ # # License for the dataset if available
130
+ # license=_LICENSE,
131
+ # # Citation for the dataset
132
+ # citation=_CITATION,
133
+ # )
134
+
135
+ # def _split_generators(self, dl_manager):
136
+ # # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
137
+ # # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
138
+
139
+ # # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
140
+ # # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
141
+ # # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
142
+ # urls = _URLS[self.config.name]
143
+ # data_dir = dl_manager.download_and_extract(urls)
144
+ # return [
145
+ # datasets.SplitGenerator(
146
+ # name=datasets.Split.TRAIN,
147
+ # # These kwargs will be passed to _generate_examples
148
+ # gen_kwargs={
149
+ # "filepath": os.path.join(data_dir, "train.jsonl"),
150
+ # "split": "train",
151
+ # },
152
+ # ),
153
+ # datasets.SplitGenerator(
154
+ # name=datasets.Split.VALIDATION,
155
+ # # These kwargs will be passed to _generate_examples
156
+ # gen_kwargs={
157
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
158
+ # "split": "dev",
159
+ # },
160
+ # ),
161
+ # datasets.SplitGenerator(
162
+ # name=datasets.Split.TEST,
163
+ # # These kwargs will be passed to _generate_examples
164
+ # gen_kwargs={
165
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
166
+ # "split": "test",
167
+ # },
168
+ # ),
169
+ # ]
170
+
171
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
172
+ # def _generate_examples(self, filepath, split):
173
+ # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
174
+ # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
175
+ # with open(filepath, encoding="utf-8") as f:
176
+ # for key, row in enumerate(f):
177
+ # data = json.loads(row)
178
+ # if self.config.name == "first_domain":
179
+ # # Yields examples as (key, example) tuples
180
+ # yield key, {
181
+ # "sentence": data["sentence"],
182
+ # "option1": data["option1"],
183
+ # "answer": "" if split == "test" else data["answer"],
184
+ # }
185
+ # else:
186
+ # yield key, {
187
+ # "sentence": data["sentence"],
188
+ # "option2": data["option2"],
189
+ # "second_domain_answer": (
190
+ # "" if split == "test" else data["second_domain_answer"]
191
+ # ),
192
+ # }