Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
10K - 100K
License:
Update phrase_similarity.py
Browse files- phrase_similarity.py +9 -5
phrase_similarity.py
CHANGED
@@ -48,10 +48,10 @@ _SPLITS = {
|
|
48 |
"train": "train-v1.0.json",
|
49 |
"dev": "dev-v1.0.json",
|
50 |
"test": "test-v1.0.json",
|
51 |
-
"test_hard": "test-hard-v1.0.json",
|
52 |
}
|
53 |
|
54 |
_PS = "PS"
|
|
|
55 |
|
56 |
|
57 |
class PSConfig(datasets.BuilderConfig):
|
@@ -71,8 +71,13 @@ class PhraseSimilarity(datasets.GeneratorBasedBuilder):
|
|
71 |
BUILDER_CONFIGS = [
|
72 |
PSConfig(
|
73 |
name=_PS,
|
74 |
-
version=datasets.Version("1.0.
|
75 |
description="The PiC Dataset for Phrase Similarity"
|
|
|
|
|
|
|
|
|
|
|
76 |
)
|
77 |
]
|
78 |
|
@@ -101,15 +106,13 @@ class PhraseSimilarity(datasets.GeneratorBasedBuilder):
|
|
101 |
"train": os.path.join(_URL, self.config.name, _SPLITS["train"]),
|
102 |
"dev": os.path.join(_URL, self.config.name, _SPLITS["dev"]),
|
103 |
"test": os.path.join(_URL, self.config.name, _SPLITS["test"]),
|
104 |
-
"test_hard": os.path.join(_URL, self.config.name, _SPLITS["test_hard"])
|
105 |
}
|
106 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
107 |
|
108 |
return [
|
109 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
110 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
111 |
-
|
112 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test_hard"]}),
|
113 |
]
|
114 |
|
115 |
def _generate_examples(self, filepath):
|
@@ -128,3 +131,4 @@ class PhraseSimilarity(datasets.GeneratorBasedBuilder):
|
|
128 |
"idx": example["idx"]
|
129 |
}
|
130 |
key += 1
|
|
|
|
48 |
"train": "train-v1.0.json",
|
49 |
"dev": "dev-v1.0.json",
|
50 |
"test": "test-v1.0.json",
|
|
|
51 |
}
|
52 |
|
53 |
_PS = "PS"
|
54 |
+
_PS_HARD = "PS-hard"
|
55 |
|
56 |
|
57 |
class PSConfig(datasets.BuilderConfig):
|
|
|
71 |
BUILDER_CONFIGS = [
|
72 |
PSConfig(
|
73 |
name=_PS,
|
74 |
+
version=datasets.Version("1.0.2"),
|
75 |
description="The PiC Dataset for Phrase Similarity"
|
76 |
+
),
|
77 |
+
PSConfig(
|
78 |
+
name=_PS_HARD,
|
79 |
+
version=datasets.Version("1.0.2"),
|
80 |
+
description="The PiC Dataset for Phrase Similarity (test set only)"
|
81 |
)
|
82 |
]
|
83 |
|
|
|
106 |
"train": os.path.join(_URL, self.config.name, _SPLITS["train"]),
|
107 |
"dev": os.path.join(_URL, self.config.name, _SPLITS["dev"]),
|
108 |
"test": os.path.join(_URL, self.config.name, _SPLITS["test"]),
|
|
|
109 |
}
|
110 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
111 |
|
112 |
return [
|
113 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
114 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
115 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
|
|
116 |
]
|
117 |
|
118 |
def _generate_examples(self, filepath):
|
|
|
131 |
"idx": example["idx"]
|
132 |
}
|
133 |
key += 1
|
134 |
+
|