Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
162f337
1 Parent(s): 5debd4d

Upload vicon.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vicon.py +184 -0
vicon.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ ViCon, comprises pairs of synonyms and antonymys across \
18
+ noun, verb, and adjective classes, offerring data to \
19
+ distinguish between similarity and dissimilarity.
20
+ """
21
+
22
+ import os
23
+ from pathlib import Path
24
+ from typing import Dict, List, Tuple
25
+
26
+ import datasets
27
+ import pandas as pd
28
+
29
+ from seacrowd.utils import schemas
30
+ from seacrowd.utils.configs import SEACrowdConfig
31
+ from seacrowd.utils.constants import Licenses, Tasks
32
+
33
+ _CITATION = """\
34
+ @inproceedings{nguyen-etal-2018-introducing,
35
+ title = "Introducing Two {V}ietnamese Datasets for Evaluating Semantic Models of (Dis-)Similarity and Relatedness",
36
+ author = "Nguyen, Kim Anh and
37
+ Schulte im Walde, Sabine and
38
+ Vu, Ngoc Thang",
39
+ editor = "Walker, Marilyn and
40
+ Ji, Heng and
41
+ Stent, Amanda",
42
+ booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
43
+ month = jun,
44
+ year = "2018",
45
+ address = "New Orleans, Louisiana",
46
+ publisher = "Association for Computational Linguistics",
47
+ url = "https://aclanthology.org/N18-2032",
48
+ doi = "10.18653/v1/N18-2032",
49
+ pages = "199--205",
50
+ }
51
+ """
52
+
53
+ _DATASETNAME = "vicon"
54
+
55
+ _DESCRIPTION = """\
56
+ ViCon, comprises pairs of synonyms and antonymys across \
57
+ noun, verb, and adjective classes, offerring data to \
58
+ distinguish between similarity and dissimilarity.
59
+ """
60
+
61
+ _HOMEPAGE = "https://www.ims.uni-stuttgart.de/forschung/ressourcen/experiment-daten/vnese-sem-datasets/"
62
+
63
+ _LANGUAGES = ["vie"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
64
+
65
+ _LICENSE = Licenses.CC_BY_NC_SA_2_0.value # example: Licenses.MIT.value, Licenses.CC_BY_NC_SA_4_0.value, Licenses.UNLICENSE.value, Licenses.UNKNOWN.value
66
+
67
+ _LOCAL = False
68
+
69
+ _URLS = {
70
+ "noun": "https://www.ims.uni-stuttgart.de/documents/ressourcen/experiment-daten/ViData.zip",
71
+ "adj": "https://www.ims.uni-stuttgart.de/documents/ressourcen/experiment-daten/ViData.zip",
72
+ "verb": "https://www.ims.uni-stuttgart.de/documents/ressourcen/experiment-daten/ViData.zip",
73
+ }
74
+
75
+ # This task is more suitable for TEXTUAL_ENTAILMENT
76
+ # because the labels (antonym, synonym) roughly correlates to (contradiction, entailment)
77
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
78
+
79
+ _SOURCE_VERSION = "1.0.0"
80
+
81
+ _SEACROWD_VERSION = "2024.06.20"
82
+
83
+
84
+ class ViConDataset(datasets.GeneratorBasedBuilder):
85
+ """
86
+ ViCon, comprises pairs of synonyms and antonymys across \
87
+ noun, verb, and adjective classes, offerring data to \
88
+ distinguish between similarity and dissimilarity.
89
+ """
90
+
91
+ POS_TAGS = ["noun", "adj", "verb"]
92
+
93
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
94
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
95
+
96
+ BUILDER_CONFIGS = [SEACrowdConfig(name=f"{_DATASETNAME}_{POS_TAG}_source", version=_SOURCE_VERSION, description=f"{_DATASETNAME}_{POS_TAG} source schema", schema="source", subset_id=f"{_DATASETNAME}_{POS_TAG}",) for POS_TAG in POS_TAGS] + [
97
+ SEACrowdConfig(
98
+ name=f"{_DATASETNAME}_{POS_TAG}_seacrowd_pairs",
99
+ version=_SEACROWD_VERSION,
100
+ description=f"{_DATASETNAME}_{POS_TAG} SEACrowd schema",
101
+ schema="seacrowd_pairs",
102
+ subset_id=f"{_DATASETNAME}_{POS_TAG}",
103
+ )
104
+ for POS_TAG in POS_TAGS
105
+ ]
106
+
107
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_noun_source"
108
+
109
+ def _info(self) -> datasets.DatasetInfo:
110
+
111
+ if self.config.schema == "source":
112
+
113
+ features = datasets.Features(
114
+ {
115
+ "Word1": datasets.Value("string"),
116
+ "Word2": datasets.Value("string"),
117
+ "Relation": datasets.Value("string"),
118
+ }
119
+ )
120
+
121
+ elif self.config.schema == "seacrowd_pairs":
122
+ features = schemas.pairs_features(["ANT", "SYN"])
123
+
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ homepage=_HOMEPAGE,
128
+ license=_LICENSE,
129
+ citation=_CITATION,
130
+ )
131
+
132
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
133
+ """Returns SplitGenerators."""
134
+
135
+ POS_TAG = self.config.name.split("_")[1]
136
+ if POS_TAG == "noun" or POS_TAG == "verb":
137
+ number = 400
138
+ elif POS_TAG == "adj":
139
+ number = 600
140
+
141
+ if POS_TAG in self.POS_TAGS:
142
+ data_dir = dl_manager.download_and_extract(_URLS[POS_TAG])
143
+
144
+ else:
145
+ data_dir = [dl_manager.download_and_extract(_URLS[POS_TAG]) for POS_TAG in self.POS_TAGS]
146
+
147
+ return [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TRAIN,
150
+ gen_kwargs={
151
+ "filepath": os.path.join(data_dir, f"ViData/ViCon/{number}_{POS_TAG}_pairs.txt"),
152
+ "split": "train",
153
+ },
154
+ )
155
+ ]
156
+
157
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
158
+ """Yields examples as (key, example) tuples."""
159
+
160
+ with open(filepath, "r", encoding="utf-8") as file:
161
+ lines = file.readlines()
162
+
163
+ data = []
164
+ for line in lines:
165
+ columns = line.strip().split("\t")
166
+ data.append(columns)
167
+
168
+ df = pd.DataFrame(data[1:], columns=data[0])
169
+
170
+ for index, row in df.iterrows():
171
+
172
+ if self.config.schema == "source":
173
+ example = row.to_dict()
174
+
175
+ elif self.config.schema == "seacrowd_pairs":
176
+
177
+ example = {
178
+ "id": str(index),
179
+ "text_1": str(row["Word1"]),
180
+ "text_2": str(row["Word2"]),
181
+ "label": str(row["Relation"]),
182
+ }
183
+
184
+ yield index, example