Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
a4ad37c
1 Parent(s): d14fbc6

Upload vilexnorm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vilexnorm.py +176 -0
vilexnorm.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The ViLexNorm corpus is a collection of comment pairs in Vietnamese, designed for the task of lexical normalization. The corpus contains 10,467 comment pairs, carefully curated and annotated for lexical normalization purposes.
18
+ These comment pairs are partitioned into three subsets: training, development, and test, distributed in an 8:1:1 ratio.
19
+ """
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+ import pandas as pd
25
+
26
+ from seacrowd.utils import schemas
27
+ from seacrowd.utils.configs import SEACrowdConfig
28
+ from seacrowd.utils.constants import Licenses, Tasks
29
+
30
+ _CITATION = """\
31
+ @inproceedings{nguyen-etal-2024-vilexnorm,
32
+ title = "{V}i{L}ex{N}orm: A Lexical Normalization Corpus for {V}ietnamese Social Media Text",
33
+ author = "Nguyen, Thanh-Nhi and
34
+ Le, Thanh-Phong and
35
+ Nguyen, Kiet",
36
+ editor = "Graham, Yvette and
37
+ Purver, Matthew",
38
+ booktitle = "Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers)",
39
+ month = mar,
40
+ year = "2024",
41
+ address = "St. Julian{'}s, Malta",
42
+ publisher = "Association for Computational Linguistics",
43
+ url = "https://aclanthology.org/2024.eacl-long.85",
44
+ pages = "1421--1437",
45
+ abstract = "Lexical normalization, a fundamental task in Natural Language Processing (NLP), involves the transformation of words into their canonical forms. This process has been proven to benefit various downstream NLP tasks greatly.
46
+ In this work, we introduce Vietnamese Lexical Normalization (ViLexNorm), the first-ever corpus developed for the Vietnamese lexical normalization task. The corpus comprises over 10,000 pairs of sentences meticulously annotated
47
+ by human annotators, sourced from public comments on Vietnam{'}s most popular social media platforms. Various methods were used to evaluate our corpus, and the best-performing system achieved a result of 57.74% using
48
+ the Error Reduction Rate (ERR) metric (van der Goot, 2019a) with the Leave-As-Is (LAI) baseline. For extrinsic evaluation, employing the model trained on ViLexNorm demonstrates the positive impact of the Vietnamese lexical normalization task
49
+ on other NLP tasks. Our corpus is publicly available exclusively for research purposes.",
50
+ }
51
+ """
52
+
53
+ _DATASETNAME = "vilexnorm"
54
+
55
+ _DESCRIPTION = """\
56
+ The ViLexNorm corpus is a collection of comment pairs in Vietnamese, designed for the task of lexical normalization. The corpus contains 10,467 comment pairs, carefully curated and annotated for lexical normalization purposes.
57
+ These comment pairs are partitioned into three subsets: training, development, and test, distributed in an 8:1:1 ratio.
58
+ """
59
+
60
+ _HOMEPAGE = "https://github.com/ngxtnhi/ViLexNorm"
61
+
62
+ _LANGUAGES = ["vie"]
63
+
64
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
65
+
66
+ _LOCAL = False
67
+
68
+ _URLS = {
69
+ "train": "https://raw.githubusercontent.com/ngxtnhi/ViLexNorm/main/data/train.csv",
70
+ "dev": "https://raw.githubusercontent.com/ngxtnhi/ViLexNorm/main/data/dev.csv",
71
+ "test": "https://raw.githubusercontent.com/ngxtnhi/ViLexNorm/main/data/test.csv",
72
+ }
73
+
74
+ _SUPPORTED_TASKS = [Tasks.MULTILEXNORM]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+
78
+ _SEACROWD_VERSION = "2024.06.20"
79
+
80
+
81
+ class VilexnormDataset(datasets.GeneratorBasedBuilder):
82
+ """The ViLexNorm corpus is a collection of comment pairs in Vietnamese, designed for the task of lexical normalization. The corpus contains 10,467 comment pairs, carefully curated and annotated for lexical normalization purposes.
83
+ These comment pairs are partitioned into three subsets: training, development, and test, distributed in an 8:1:1 ratio."""
84
+
85
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
87
+
88
+ BUILDER_CONFIGS = [
89
+ SEACrowdConfig(
90
+ name=f"{_DATASETNAME}_source",
91
+ version=SOURCE_VERSION,
92
+ description=f"{_DATASETNAME} source schema",
93
+ schema="source",
94
+ subset_id=f"{_DATASETNAME}",
95
+ ),
96
+ SEACrowdConfig(
97
+ name=f"{_DATASETNAME}_seacrowd_t2t",
98
+ version=SEACROWD_VERSION,
99
+ description=f"{_DATASETNAME} SEACrowd schema",
100
+ schema="seacrowd_t2t",
101
+ subset_id=f"{_DATASETNAME}",
102
+ ),
103
+ ]
104
+
105
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
106
+
107
+ def _info(self) -> datasets.DatasetInfo:
108
+
109
+ if self.config.schema == "source":
110
+ features = datasets.Features(
111
+ {
112
+ "id": datasets.Value("int32"),
113
+ "original": datasets.Value("string"),
114
+ "normalized": datasets.Value("string"),
115
+ }
116
+ )
117
+
118
+ elif self.config.schema == "seacrowd_t2t":
119
+ features = schemas.text2text_features
120
+
121
+ return datasets.DatasetInfo(
122
+ description=_DESCRIPTION,
123
+ features=features,
124
+ homepage=_HOMEPAGE,
125
+ license=_LICENSE,
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
130
+ """Returns SplitGenerators."""
131
+
132
+ data_dir = dl_manager.download_and_extract(_URLS)
133
+
134
+ return [
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TRAIN,
137
+ gen_kwargs={
138
+ "filepath": data_dir["train"],
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ gen_kwargs={
144
+ "filepath": data_dir["test"],
145
+ },
146
+ ),
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.VALIDATION,
149
+ gen_kwargs={
150
+ "filepath": data_dir["dev"],
151
+ },
152
+ ),
153
+ ]
154
+
155
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
156
+ """Yields examples as (key, example) tuples."""
157
+
158
+ df = pd.read_csv(filepath)
159
+
160
+ if self.config.schema == "source":
161
+ for i, row in df.iterrows():
162
+ yield i, {
163
+ "id": i,
164
+ "original": row["original"],
165
+ "normalized": row["normalized"],
166
+ }
167
+
168
+ elif self.config.schema == "seacrowd_t2t":
169
+ for i, row in df.iterrows():
170
+ yield i, {
171
+ "id": str(i),
172
+ "text_1": row["original"],
173
+ "text_2": row["normalized"],
174
+ "text_1_name": "original",
175
+ "text_2_name": "normalized",
176
+ }