Attempting to update to read xlsx file
Browse files
temp_africaNLP_keyword_spotting_for_african_languages.py
CHANGED
@@ -1,3 +1,7 @@
|
|
|
|
|
|
|
|
|
|
1 |
# coding=utf-8
|
2 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
#
|
@@ -19,6 +23,7 @@ import csv
|
|
19 |
import json
|
20 |
import os
|
21 |
import rarfile
|
|
|
22 |
|
23 |
import datasets
|
24 |
|
@@ -129,10 +134,11 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
129 |
name=datasets.Split.TRAIN,
|
130 |
# These kwargs will be passed to _generate_examples
|
131 |
gen_kwargs={
|
132 |
-
"filepath": os.path.join(data_dir, "
|
133 |
"split": "train",
|
134 |
},
|
135 |
),
|
|
|
136 |
datasets.SplitGenerator(
|
137 |
name=datasets.Split.TEST,
|
138 |
# These kwargs will be passed to _generate_examples
|
@@ -149,6 +155,7 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
149 |
"split": "dev",
|
150 |
},
|
151 |
),
|
|
|
152 |
]
|
153 |
|
154 |
def _generate_examples(
|
@@ -157,19 +164,13 @@ class TempAfricaNLPKeywordSpottingForAfricanLanguages(datasets.GeneratorBasedBui
|
|
157 |
""" Yields examples as (key, example) tuples. """
|
158 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
159 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
160 |
-
|
161 |
with open(filepath, encoding="utf-8") as f:
|
162 |
-
for id_, row in
|
163 |
-
data =
|
164 |
-
|
165 |
-
|
166 |
-
"sentence": data["sentence"],
|
167 |
-
"option1": data["option1"],
|
168 |
-
"answer": "" if split == "test" else data["answer"],
|
169 |
-
}
|
170 |
-
else:
|
171 |
yield id_, {
|
172 |
"sentence": data["sentence"],
|
173 |
-
"
|
174 |
-
|
175 |
-
}
|
|
|
1 |
+
# Testing how to load rarfile from Zenodo.
|
2 |
+
# https://github.com/huggingface/datasets/blob/dfdd2f949c1840926c02ae47f0f0c43083ef0b1f/datasets/common_voice/common_voice.py#L661 provided some inspiration
|
3 |
+
# also https://huggingface.co/docs/datasets/master/dataset_script.html
|
4 |
+
|
5 |
# coding=utf-8
|
6 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
7 |
#
|
|
|
23 |
import json
|
24 |
import os
|
25 |
import rarfile
|
26 |
+
import pandas as pd
|
27 |
|
28 |
import datasets
|
29 |
|
|
|
134 |
name=datasets.Split.TRAIN,
|
135 |
# These kwargs will be passed to _generate_examples
|
136 |
gen_kwargs={
|
137 |
+
"filepath": os.path.join(data_dir, "clips.xlsx"),
|
138 |
"split": "train",
|
139 |
},
|
140 |
),
|
141 |
+
"""
|
142 |
datasets.SplitGenerator(
|
143 |
name=datasets.Split.TEST,
|
144 |
# These kwargs will be passed to _generate_examples
|
|
|
155 |
"split": "dev",
|
156 |
},
|
157 |
),
|
158 |
+
"""
|
159 |
]
|
160 |
|
161 |
def _generate_examples(
|
|
|
164 |
""" Yields examples as (key, example) tuples. """
|
165 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
166 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
167 |
+
clips_df= pd.read_excel(filepath)
|
168 |
with open(filepath, encoding="utf-8") as f:
|
169 |
+
for id_, row in clips_df.iterrows()
|
170 |
+
data = row
|
171 |
+
bucket = row["bucket"]
|
172 |
+
if bucket == split:
|
|
|
|
|
|
|
|
|
|
|
173 |
yield id_, {
|
174 |
"sentence": data["sentence"],
|
175 |
+
"path": data["path"],
|
176 |
+
}
|
|