Datasets:

Languages:
Filipino
ArXiv:
License:
holylovenia commited on
Commit
fe4a01a
·
verified ·
1 Parent(s): 9ac887e

Upload filwordnet.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. filwordnet.py +146 -0
filwordnet.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses
24
+
25
+ _CITATION = """\
26
+ @article{article,
27
+ author = {Borra, Allan and Pease, Adam and Edita, Rachel and Roxas, and Dita, Shirley},
28
+ year = {2010},
29
+ month = {01},
30
+ pages = {},
31
+ title = {Introducing Filipino WordNet}
32
+ }
33
+ """
34
+
35
+ _DATASETNAME = "filwordnet"
36
+
37
+ _DESCRIPTION = """\
38
+ Filipino WordNet (FilWordNet) is a lexical database of Filipino language.
39
+ It was derived from the Princeton WordNet and translated by humans to Filipino.
40
+ It documents 13,539 unique words and 9,519 synsets. Each synset includes the definition,
41
+ part-of-speech, word senses, and Suggested Upper Merged Ontology terms (SUMO terms).
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/danjohnvelasco/Filipino-WordNet"
45
+
46
+ _LANGUAGES = ["fil"]
47
+
48
+ _LICENSE = Licenses.UNKNOWN.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = {
53
+ _DATASETNAME: "https://raw.githubusercontent.com/danjohnvelasco/Filipino-WordNet/main/filwordnet.csv",
54
+ }
55
+
56
+ _SUPPORTED_TASKS = []
57
+
58
+ _SOURCE_VERSION = "1.0.0"
59
+
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+
62
+
63
+ class FilWordNetDataset(datasets.GeneratorBasedBuilder):
64
+ """The Filipino WordNet (FilWordNet) is a lexical database of Filipino language containing 13,539 unique words and 9,519 synsets."""
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ SEACrowdConfig(
71
+ name=f"{_DATASETNAME}_source",
72
+ version=SOURCE_VERSION,
73
+ description=f"{_DATASETNAME} source schema",
74
+ schema="source",
75
+ subset_id=f"{_DATASETNAME}",
76
+ )
77
+ ]
78
+
79
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
80
+
81
+ def _info(self) -> datasets.DatasetInfo:
82
+ if self.config.schema == "source":
83
+ features = datasets.Features(
84
+ {
85
+ "word_id": datasets.Value("int32"),
86
+ "lemma": datasets.Value("string"),
87
+ "synset_id": datasets.Value("int32"),
88
+ "sense_id": datasets.Value("int32"),
89
+ "pos": datasets.Value("string"),
90
+ "lexdomain_id": datasets.Value("int32"),
91
+ "definition": datasets.Value("string"),
92
+ "last_modifier": datasets.Value("int32"),
93
+ "sumo": datasets.Value("string"),
94
+ }
95
+ )
96
+
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ features=features,
100
+ homepage=_HOMEPAGE,
101
+ license=_LICENSE,
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
106
+ """Returns SplitGenerators."""
107
+ urls = _URLS[_DATASETNAME]
108
+ file = dl_manager.download_and_extract(urls)
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={
114
+ "filepath": file,
115
+ "split": "train",
116
+ },
117
+ )
118
+ ]
119
+
120
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
121
+ """Yields examples as (key, example) tuples."""
122
+ rows = []
123
+ is_first_row = True
124
+ with open(filepath, "r") as file:
125
+ csv_reader = csv.reader(file, delimiter=",")
126
+ for row in csv_reader:
127
+ if is_first_row: # skip first row, they are column names
128
+ is_first_row = False
129
+ continue
130
+
131
+ rows.append(row)
132
+
133
+ if self.config.schema == "source":
134
+ for key, row in enumerate(rows):
135
+ example = {
136
+ "word_id": row[0],
137
+ "lemma": row[1],
138
+ "synset_id": row[2],
139
+ "sense_id": row[3],
140
+ "pos": row[4],
141
+ "lexdomain_id": row[5],
142
+ "definition": row[6],
143
+ "last_modifier": row[7],
144
+ "sumo": row[8],
145
+ }
146
+ yield key, example