Datasets:

ArXiv:
License:
holylovenia commited on
Commit
21be239
1 Parent(s): d3e763e

Upload filipino_words_aoa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. filipino_words_aoa.py +130 -0
filipino_words_aoa.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from pathlib import Path
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """
26
+ @techreport{dulaynag2021filaoa,
27
+ author = {Dulay, Katrina May and Nag, Somali},
28
+ title = {TalkTogether Age-of-Acquisition Word Lists for 885 Kannada and Filipino Words},
29
+ institution = {TalkTogether},
30
+ year = {2021},
31
+ type = {Technical Report},
32
+ url = {https://osf.io/gnjmr},
33
+ doi = {10.17605/OSF.IO/3ZDFN},
34
+ }
35
+ """
36
+
37
+ _LOCAL = False
38
+ _LANGUAGES = ["fil", "eng"]
39
+ _DATASETNAME = "filipino_words_aoa"
40
+ _DESCRIPTION = """\
41
+ The dataset contains 885 Filipino words derived from an age-of-acquisition participant study. The words are derived child-directed corpora
42
+ using pre-specified linguistic criteria. Each word in the corpora contains information about its meaning, part-of-speech (POS), age band,
43
+ morpheme count, syllable length, phoneme length, and the level of book it was derived from. The dataset can be used for lexical complexity
44
+ prediction, lexical simplification, and readability assessment research.
45
+ """
46
+
47
+ _HOMEPAGE = "https://osf.io/3zdfn/"
48
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
49
+ _URL = "https://osf.io/download/j42g7/"
50
+
51
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
52
+ _SOURCE_VERSION = "1.0.0"
53
+ _SEACROWD_VERSION = "2024.06.20"
54
+
55
+
56
+ class FilipinoWordsAOADataset(datasets.GeneratorBasedBuilder):
57
+ """
58
+ Dataset of Filipino words, their English meanings, and their part-of-speech tag
59
+ obtained from an age-of-acquisition study.
60
+ """
61
+
62
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
63
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
64
+
65
+ BUILDER_CONFIGS = [
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_source",
68
+ version=SOURCE_VERSION,
69
+ description=f"{_DATASETNAME} source schema",
70
+ schema="source",
71
+ subset_id=_DATASETNAME,
72
+ ),
73
+ SEACrowdConfig(
74
+ name=f"{_DATASETNAME}_seacrowd_t2t",
75
+ version=SEACROWD_VERSION,
76
+ description=f"{_DATASETNAME} SeaCrowd text-to-text schema",
77
+ schema="seacrowd_t2t",
78
+ subset_id=_DATASETNAME,
79
+ ),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
83
+
84
+ def _info(self) -> datasets.DatasetInfo:
85
+ if self.config.schema == "source":
86
+ features = datasets.Features(
87
+ {
88
+ "word": datasets.Value("string"),
89
+ "meaning": datasets.Value("string"),
90
+ "POS_tag": datasets.Value("string"),
91
+ "mean_AoA": datasets.Value("float64"),
92
+ "mean_AoA_ageband": datasets.Value("string"),
93
+ "morpheme_count": datasets.Value("int64"),
94
+ "syllable_length": datasets.Value("int64"),
95
+ "phoneme_length": datasets.Value("int64"),
96
+ "book_ageband": datasets.Value("string"),
97
+ }
98
+ )
99
+ elif self.config.schema == "seacrowd_t2t":
100
+ features = schemas.text2text_features
101
+
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=features,
105
+ homepage=_HOMEPAGE,
106
+ license=_LICENSE,
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
111
+ """Returns SplitGenerators."""
112
+ filepath = dl_manager.download(_URL)
113
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})]
114
+
115
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
116
+ """Yields examples as (key, example) tuples."""
117
+ df = pd.read_excel(filepath, index_col=None)
118
+ for index, row in df.iterrows():
119
+ if self.config.schema == "source":
120
+ example = row.to_dict()
121
+
122
+ elif self.config.schema == "seacrowd_t2t":
123
+ example = {
124
+ "id": str(index),
125
+ "text_1": row["word"],
126
+ "text_2": row["meaning"],
127
+ "text_1_name": "fil",
128
+ "text_2_name": "eng",
129
+ }
130
+ yield index, example