Commit
•
161ece9
1
Parent(s):
cd6b868
Convert dataset to Parquet (#3)
Browse files- Convert dataset to Parquet (32e8ab15f0df7139ae5b8a87eded08f8ceedf11b)
- Add labeled_swap data files (6ddde35e8a87929d9536cd1463a579a44503e96a)
- Add unlabeled_final data files (06b62eae81864f3d8d9d9d7cc3a8130e542b5cc8)
- Delete loading script (638290f20b38fac4b9c3ef63d57fd053b3b4868c)
- Delete legacy dataset_infos.json (1e90e82ecadb712857d299abefce8df34387aaee)
- README.md +35 -16
- dataset_infos.json +0 -1
- labeled_final/test-00000-of-00001.parquet +3 -0
- labeled_final/train-00000-of-00001.parquet +3 -0
- labeled_final/validation-00000-of-00001.parquet +3 -0
- labeled_swap/train-00000-of-00001.parquet +3 -0
- paws.py +0 -209
- unlabeled_final/train-00000-of-00001.parquet +3 -0
- unlabeled_final/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -24,6 +24,10 @@ task_ids:
|
|
24 |
- multi-input-text-classification
|
25 |
paperswithcode_id: paws
|
26 |
pretty_name: 'PAWS: Paraphrase Adversaries from Word Scrambling'
|
|
|
|
|
|
|
|
|
27 |
tags:
|
28 |
- paraphrase-identification
|
29 |
dataset_info:
|
@@ -43,16 +47,16 @@ dataset_info:
|
|
43 |
'1': '1'
|
44 |
splits:
|
45 |
- name: train
|
46 |
-
num_bytes:
|
47 |
num_examples: 49401
|
48 |
- name: test
|
49 |
-
num_bytes:
|
50 |
num_examples: 8000
|
51 |
- name: validation
|
52 |
-
num_bytes:
|
53 |
num_examples: 8000
|
54 |
-
download_size:
|
55 |
-
dataset_size:
|
56 |
- config_name: labeled_swap
|
57 |
features:
|
58 |
- name: id
|
@@ -69,10 +73,10 @@ dataset_info:
|
|
69 |
'1': '1'
|
70 |
splits:
|
71 |
- name: train
|
72 |
-
num_bytes:
|
73 |
num_examples: 30397
|
74 |
-
download_size:
|
75 |
-
dataset_size:
|
76 |
- config_name: unlabeled_final
|
77 |
features:
|
78 |
- name: id
|
@@ -89,17 +93,32 @@ dataset_info:
|
|
89 |
'1': '1'
|
90 |
splits:
|
91 |
- name: train
|
92 |
-
num_bytes:
|
93 |
num_examples: 645652
|
94 |
- name: validation
|
95 |
-
num_bytes:
|
96 |
num_examples: 10000
|
97 |
-
download_size:
|
98 |
-
dataset_size:
|
99 |
-
|
100 |
-
- labeled_final
|
101 |
-
|
102 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
---
|
104 |
|
105 |
# Dataset Card for PAWS: Paraphrase Adversaries from Word Scrambling
|
|
|
24 |
- multi-input-text-classification
|
25 |
paperswithcode_id: paws
|
26 |
pretty_name: 'PAWS: Paraphrase Adversaries from Word Scrambling'
|
27 |
+
config_names:
|
28 |
+
- labeled_final
|
29 |
+
- labeled_swap
|
30 |
+
- unlabeled_final
|
31 |
tags:
|
32 |
- paraphrase-identification
|
33 |
dataset_info:
|
|
|
47 |
'1': '1'
|
48 |
splits:
|
49 |
- name: train
|
50 |
+
num_bytes: 12239938
|
51 |
num_examples: 49401
|
52 |
- name: test
|
53 |
+
num_bytes: 1987794
|
54 |
num_examples: 8000
|
55 |
- name: validation
|
56 |
+
num_bytes: 1975862
|
57 |
num_examples: 8000
|
58 |
+
download_size: 10899391
|
59 |
+
dataset_size: 16203594
|
60 |
- config_name: labeled_swap
|
61 |
features:
|
62 |
- name: id
|
|
|
73 |
'1': '1'
|
74 |
splits:
|
75 |
- name: train
|
76 |
+
num_bytes: 7963619
|
77 |
num_examples: 30397
|
78 |
+
download_size: 5741756
|
79 |
+
dataset_size: 7963619
|
80 |
- config_name: unlabeled_final
|
81 |
features:
|
82 |
- name: id
|
|
|
93 |
'1': '1'
|
94 |
splits:
|
95 |
- name: train
|
96 |
+
num_bytes: 157806476
|
97 |
num_examples: 645652
|
98 |
- name: validation
|
99 |
+
num_bytes: 2442165
|
100 |
num_examples: 10000
|
101 |
+
download_size: 112644285
|
102 |
+
dataset_size: 160248641
|
103 |
+
configs:
|
104 |
+
- config_name: labeled_final
|
105 |
+
data_files:
|
106 |
+
- split: train
|
107 |
+
path: labeled_final/train-*
|
108 |
+
- split: test
|
109 |
+
path: labeled_final/test-*
|
110 |
+
- split: validation
|
111 |
+
path: labeled_final/validation-*
|
112 |
+
- config_name: labeled_swap
|
113 |
+
data_files:
|
114 |
+
- split: train
|
115 |
+
path: labeled_swap/train-*
|
116 |
+
- config_name: unlabeled_final
|
117 |
+
data_files:
|
118 |
+
- split: train
|
119 |
+
path: unlabeled_final/train-*
|
120 |
+
- split: validation
|
121 |
+
path: unlabeled_final/validation-*
|
122 |
---
|
123 |
|
124 |
# Dataset Card for PAWS: Paraphrase Adversaries from Word Scrambling
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"labeled_final": {"description": "PAWS: Paraphrase Adversaries from Word Scrambling\n\nThis dataset contains 108,463 human-labeled and 656k noisily labeled pairs that feature\nthe importance of modeling structure, context, and word order information for the problem\nof paraphrase identification. The dataset has two subsets, one based on Wikipedia and the\nother one based on the Quora Question Pairs (QQP) dataset.\n\nFor further details, see the accompanying paper: PAWS: Paraphrase Adversaries from Word Scrambling\n(https://arxiv.org/abs/1904.01130)\n\nPAWS-QQP is not available due to license of QQP. It must be reconstructed by downloading the original\ndata and then running our scripts to produce the data and attach the labels.\n\nNOTE: There might be some missing or wrong labels in the dataset and we have replaced them with -1.\n", "citation": "@InProceedings{paws2019naacl,\n title = {{PAWS: Paraphrase Adversaries from Word Scrambling}},\n author = {Zhang, Yuan and Baldridge, Jason and He, Luheng},\n booktitle = {Proc. of NAACL},\n year = {2019}\n}\n", "homepage": "https://github.com/google-research-datasets/paws", "license": "The dataset may be freely used for any purpose, although acknowledgement of Google LLC (\"Google\") as the data source would be appreciated. The dataset is provided \"AS IS\" without any warranty, express or implied. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset.", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "paws", "config_name": "labeled_final", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12239978, "num_examples": 49401, "dataset_name": "paws"}, "test": {"name": "test", "num_bytes": 1987802, "num_examples": 8000, "dataset_name": "paws"}, "validation": {"name": "validation", "num_bytes": 1975870, "num_examples": 8000, "dataset_name": "paws"}}, "download_checksums": {"https://storage.googleapis.com/paws/english/paws_wiki_labeled_final.tar.gz": {"num_bytes": 4687157, "checksum": "1aad6cbb8a90b15563a0c154752c2b2c8e3bc5bdaa125172214d598bc76bc9fd"}}, "download_size": 4687157, "post_processing_size": null, "dataset_size": 16203650, "size_in_bytes": 20890807}, "labeled_swap": {"description": "PAWS: Paraphrase Adversaries from Word Scrambling\n\nThis dataset contains 108,463 human-labeled and 656k noisily labeled pairs that feature\nthe importance of modeling structure, context, and word order information for the problem\nof paraphrase identification. The dataset has two subsets, one based on Wikipedia and the\nother one based on the Quora Question Pairs (QQP) dataset.\n\nFor further details, see the accompanying paper: PAWS: Paraphrase Adversaries from Word Scrambling\n(https://arxiv.org/abs/1904.01130)\n\nPAWS-QQP is not available due to license of QQP. It must be reconstructed by downloading the original\ndata and then running our scripts to produce the data and attach the labels.\n\nNOTE: There might be some missing or wrong labels in the dataset and we have replaced them with -1.\n", "citation": "@InProceedings{paws2019naacl,\n title = {{PAWS: Paraphrase Adversaries from Word Scrambling}},\n author = {Zhang, Yuan and Baldridge, Jason and He, Luheng},\n booktitle = {Proc. of NAACL},\n year = {2019}\n}\n", "homepage": "https://github.com/google-research-datasets/paws", "license": "The dataset may be freely used for any purpose, although acknowledgement of Google LLC (\"Google\") as the data source would be appreciated. The dataset is provided \"AS IS\" without any warranty, express or implied. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset.", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "paws", "config_name": "labeled_swap", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7963651, "num_examples": 30397, "dataset_name": "paws"}}, "download_checksums": {"https://storage.googleapis.com/paws/english/paws_wiki_labeled_swap.tar.gz": {"num_bytes": 2257283, "checksum": "886ddb2f7f7499b2f64d260956ebbd6e14fc436eadac56cdbb966831b00d7861"}}, "download_size": 2257283, "post_processing_size": null, "dataset_size": 7963651, "size_in_bytes": 10220934}, "unlabeled_final": {"description": "PAWS: Paraphrase Adversaries from Word Scrambling\n\nThis dataset contains 108,463 human-labeled and 656k noisily labeled pairs that feature\nthe importance of modeling structure, context, and word order information for the problem\nof paraphrase identification. The dataset has two subsets, one based on Wikipedia and the\nother one based on the Quora Question Pairs (QQP) dataset.\n\nFor further details, see the accompanying paper: PAWS: Paraphrase Adversaries from Word Scrambling\n(https://arxiv.org/abs/1904.01130)\n\nPAWS-QQP is not available due to license of QQP. It must be reconstructed by downloading the original\ndata and then running our scripts to produce the data and attach the labels.\n\nNOTE: There might be some missing or wrong labels in the dataset and we have replaced them with -1.\n", "citation": "@InProceedings{paws2019naacl,\n title = {{PAWS: Paraphrase Adversaries from Word Scrambling}},\n author = {Zhang, Yuan and Baldridge, Jason and He, Luheng},\n booktitle = {Proc. of NAACL},\n year = {2019}\n}\n", "homepage": "https://github.com/google-research-datasets/paws", "license": "The dataset may be freely used for any purpose, although acknowledgement of Google LLC (\"Google\") as the data source would be appreciated. The dataset is provided \"AS IS\" without any warranty, express or implied. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset.", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "paws", "config_name": "unlabeled_final", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 157806996, "num_examples": 645652, "dataset_name": "paws"}, "validation": {"name": "validation", "num_bytes": 2442173, "num_examples": 10000, "dataset_name": "paws"}}, "download_checksums": {"https://storage.googleapis.com/paws/english/paws_wiki_unlabeled_final.tar.gz": {"num_bytes": 47393331, "checksum": "c70222d390ece5218e397b3ea4b3797212ffe945fe1eae088fa6cb317c2ca3c6"}}, "download_size": 47393331, "post_processing_size": null, "dataset_size": 160249169, "size_in_bytes": 207642500}}
|
|
|
|
labeled_final/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae342ff12bb84b84b95f468abf5db6cb7c7bd578271299fe9c99be75b8132f4d
|
3 |
+
size 1235128
|
labeled_final/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8dc9ad3e5f30ad9a86b290fe236d528ef23a5751fec9a35d99cbacf68ba277cf
|
3 |
+
size 8433884
|
labeled_final/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7760d829453764ba342a6f562809a8ed21c2c3eec3fd9ffa544089f145d42f6d
|
3 |
+
size 1230379
|
labeled_swap/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21b9b68cd9398f01e6c766a9b12bc9a31cee11a6170a5acc5fa87ba853033a9b
|
3 |
+
size 5741756
|
paws.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""PAWS, a dataset for paraphrase identification"""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
|
20 |
-
import datasets
|
21 |
-
|
22 |
-
|
23 |
-
_CITATION = """\
|
24 |
-
@InProceedings{paws2019naacl,
|
25 |
-
title = {{PAWS: Paraphrase Adversaries from Word Scrambling}},
|
26 |
-
author = {Zhang, Yuan and Baldridge, Jason and He, Luheng},
|
27 |
-
booktitle = {Proc. of NAACL},
|
28 |
-
year = {2019}
|
29 |
-
}
|
30 |
-
"""
|
31 |
-
|
32 |
-
_DESCRIPTION = """\
|
33 |
-
PAWS: Paraphrase Adversaries from Word Scrambling
|
34 |
-
|
35 |
-
This dataset contains 108,463 human-labeled and 656k noisily labeled pairs that feature
|
36 |
-
the importance of modeling structure, context, and word order information for the problem
|
37 |
-
of paraphrase identification. The dataset has two subsets, one based on Wikipedia and the
|
38 |
-
other one based on the Quora Question Pairs (QQP) dataset.
|
39 |
-
|
40 |
-
For further details, see the accompanying paper: PAWS: Paraphrase Adversaries from Word Scrambling
|
41 |
-
(https://arxiv.org/abs/1904.01130)
|
42 |
-
|
43 |
-
PAWS-QQP is not available due to license of QQP. It must be reconstructed by downloading the original
|
44 |
-
data and then running our scripts to produce the data and attach the labels.
|
45 |
-
|
46 |
-
NOTE: There might be some missing or wrong labels in the dataset and we have replaced them with -1.
|
47 |
-
"""
|
48 |
-
|
49 |
-
_HOMEPAGE = "https://github.com/google-research-datasets/paws"
|
50 |
-
|
51 |
-
_LICENSE = 'The dataset may be freely used for any purpose, although acknowledgement of Google LLC ("Google") as the data source would be appreciated. The dataset is provided "AS IS" without any warranty, express or implied. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset.'
|
52 |
-
|
53 |
-
_DATA_OPTIONS = [
|
54 |
-
"labeled_final",
|
55 |
-
"labeled_swap",
|
56 |
-
"unlabeled_final",
|
57 |
-
]
|
58 |
-
|
59 |
-
|
60 |
-
class PAWSConfig(datasets.BuilderConfig):
|
61 |
-
"""BuilderConfig for PAWS."""
|
62 |
-
|
63 |
-
def __init__(self, **kwargs):
|
64 |
-
"""Constructs a PAWSConfig.
|
65 |
-
Args:
|
66 |
-
**kwargs: keyword arguments forwarded to super.
|
67 |
-
"""
|
68 |
-
super(PAWSConfig, self).__init__(version=datasets.Version("1.1.0", ""), **kwargs),
|
69 |
-
|
70 |
-
|
71 |
-
class PAWS(datasets.GeneratorBasedBuilder):
|
72 |
-
"""PAWS, a dataset for paraphrase identification"""
|
73 |
-
|
74 |
-
VERSION = datasets.Version("1.1.0")
|
75 |
-
|
76 |
-
BUILDER_CONFIGS = [
|
77 |
-
PAWSConfig(
|
78 |
-
name=config_name,
|
79 |
-
description=(f"This config contains samples of {config_name}."),
|
80 |
-
)
|
81 |
-
for config_name in _DATA_OPTIONS
|
82 |
-
]
|
83 |
-
|
84 |
-
def _info(self):
|
85 |
-
features = datasets.Features(
|
86 |
-
{
|
87 |
-
"id": datasets.Value("int32"),
|
88 |
-
"sentence1": datasets.Value("string"),
|
89 |
-
"sentence2": datasets.Value("string"),
|
90 |
-
"label": datasets.features.ClassLabel(names=["0", "1"]),
|
91 |
-
}
|
92 |
-
)
|
93 |
-
return datasets.DatasetInfo(
|
94 |
-
# This is the description that will appear on the datasets page.
|
95 |
-
description=_DESCRIPTION,
|
96 |
-
# This defines the different columns of the dataset and their types
|
97 |
-
features=features, # Here we define them above because they are different between the two configurations
|
98 |
-
# If there's a common (input, target) tuple from the features,
|
99 |
-
# specify them here. They'll be used if as_supervised=True in
|
100 |
-
# builder.as_dataset.
|
101 |
-
supervised_keys=None,
|
102 |
-
# Homepage of the dataset for documentation
|
103 |
-
homepage=_HOMEPAGE,
|
104 |
-
# License for the dataset if available
|
105 |
-
license=_LICENSE,
|
106 |
-
# Citation for the dataset
|
107 |
-
citation=_CITATION,
|
108 |
-
)
|
109 |
-
|
110 |
-
def _split_generators(self, dl_manager):
|
111 |
-
"""Returns SplitGenerators."""
|
112 |
-
|
113 |
-
_DATA_URL = f"https://storage.googleapis.com/paws/english/paws_wiki_{self.config.name}.tar.gz"
|
114 |
-
archive = dl_manager.download(_DATA_URL)
|
115 |
-
|
116 |
-
if self.config.name == "labeled_final":
|
117 |
-
_TRAIN_FILE_NAME = "/".join(["final", "train.tsv"])
|
118 |
-
_VAL_FILE_NAME = "/".join(["final", "dev.tsv"])
|
119 |
-
_TEST_FILE_NAME = "/".join(["final", "test.tsv"])
|
120 |
-
return [
|
121 |
-
datasets.SplitGenerator(
|
122 |
-
name=datasets.Split.TRAIN,
|
123 |
-
# These kwargs will be passed to _generate_examples
|
124 |
-
gen_kwargs={
|
125 |
-
"filepath": _TRAIN_FILE_NAME,
|
126 |
-
"files": dl_manager.iter_archive(archive),
|
127 |
-
},
|
128 |
-
),
|
129 |
-
datasets.SplitGenerator(
|
130 |
-
name=datasets.Split.TEST,
|
131 |
-
# These kwargs will be passed to _generate_examples
|
132 |
-
gen_kwargs={
|
133 |
-
"filepath": _TEST_FILE_NAME,
|
134 |
-
"files": dl_manager.iter_archive(archive),
|
135 |
-
},
|
136 |
-
),
|
137 |
-
datasets.SplitGenerator(
|
138 |
-
name=datasets.Split.VALIDATION,
|
139 |
-
# These kwargs will be passed to _generate_examples
|
140 |
-
gen_kwargs={
|
141 |
-
"filepath": _VAL_FILE_NAME,
|
142 |
-
"files": dl_manager.iter_archive(archive),
|
143 |
-
},
|
144 |
-
),
|
145 |
-
]
|
146 |
-
|
147 |
-
elif self.config.name == "labeled_swap":
|
148 |
-
_TRAIN_FILE_NAME = "/".join(["swap", "train.tsv"])
|
149 |
-
return [
|
150 |
-
datasets.SplitGenerator(
|
151 |
-
name=datasets.Split.TRAIN,
|
152 |
-
# These kwargs will be passed to _generate_examples
|
153 |
-
gen_kwargs={
|
154 |
-
"filepath": _TRAIN_FILE_NAME,
|
155 |
-
"files": dl_manager.iter_archive(archive),
|
156 |
-
},
|
157 |
-
),
|
158 |
-
]
|
159 |
-
|
160 |
-
elif self.config.name == "unlabeled_final":
|
161 |
-
_TRAIN_FILE_NAME = "/".join(["unlabeled", "final", "train.tsv"])
|
162 |
-
_VAL_FILE_NAME = "/".join(["unlabeled", "final", "dev.tsv"])
|
163 |
-
return [
|
164 |
-
datasets.SplitGenerator(
|
165 |
-
name=datasets.Split.TRAIN,
|
166 |
-
# These kwargs will be passed to _generate_examples
|
167 |
-
gen_kwargs={
|
168 |
-
"filepath": _TRAIN_FILE_NAME,
|
169 |
-
"files": dl_manager.iter_archive(archive),
|
170 |
-
},
|
171 |
-
),
|
172 |
-
datasets.SplitGenerator(
|
173 |
-
name=datasets.Split.VALIDATION,
|
174 |
-
# These kwargs will be passed to _generate_examples
|
175 |
-
gen_kwargs={
|
176 |
-
"filepath": _VAL_FILE_NAME,
|
177 |
-
"files": dl_manager.iter_archive(archive),
|
178 |
-
},
|
179 |
-
),
|
180 |
-
]
|
181 |
-
else:
|
182 |
-
raise NotImplementedError(f"{self.config.name} does not exist")
|
183 |
-
|
184 |
-
def _generate_examples(self, filepath, files):
|
185 |
-
"""Yields examples."""
|
186 |
-
for path, f in files:
|
187 |
-
if path == filepath:
|
188 |
-
lines = (line.decode("utf-8") for line in f)
|
189 |
-
data = csv.DictReader(lines, delimiter="\t")
|
190 |
-
for id_, row in enumerate(data):
|
191 |
-
if self.config.name != "unlabeled_final":
|
192 |
-
if row["label"] not in ["0", "1"]:
|
193 |
-
row["label"] = -1
|
194 |
-
yield id_, {
|
195 |
-
"id": row["id"],
|
196 |
-
"sentence1": row["sentence1"],
|
197 |
-
"sentence2": row["sentence2"],
|
198 |
-
"label": row["label"],
|
199 |
-
}
|
200 |
-
else:
|
201 |
-
if row["noisy_label"] not in ["0", "1"]:
|
202 |
-
row["noisy_label"] = -1
|
203 |
-
yield id_, {
|
204 |
-
"id": row["id"],
|
205 |
-
"sentence1": row["sentence1"],
|
206 |
-
"sentence2": row["sentence2"],
|
207 |
-
"label": row["noisy_label"],
|
208 |
-
}
|
209 |
-
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unlabeled_final/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9724b30b187e8e2a6a4372cdefbeead1504d8645cdf0e1886c36b5ba3a68918
|
3 |
+
size 110988709
|
unlabeled_final/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:438720a7c059d2be528953c6a32f8542098aea02c6697460fc7b2d52c124a798
|
3 |
+
size 1655576
|