Datasets:

Modalities:
Audio
Text
Libraries:
Datasets
License:
parquet-converter commited on
Commit
497fb4f
·
1 Parent(s): cbccc97

Update parquet files

Browse files
NINJAL-Ainu-Folklore.py DELETED
@@ -1,137 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """NINJAL Ainu folklore corpus"""
16
-
17
- import os
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- _DESCRIPTION = ""
24
- _CITATION = ""
25
- _HOMEPAGE_URL = ""
26
-
27
- _BASE_PATH = "data/"
28
- _DATA_URL = _BASE_PATH + "audio/{split}.tar.gz"
29
- _META_URL = _BASE_PATH + "{split}.json"
30
-
31
-
32
- class AinuFolkloreConfig(datasets.BuilderConfig):
33
- def __init__(self, name, **kwargs):
34
- super().__init__(name=name, version=datasets.Version("0.0.0", ""), **kwargs)
35
-
36
-
37
- class AinuFolklore(datasets.GeneratorBasedBuilder):
38
- BUILDER_CONFIGS = [AinuFolkloreConfig("all")]
39
-
40
- def _info(self):
41
- task_templates = None
42
- features = datasets.Features(
43
- {
44
- "id": datasets.Value("string"),
45
- "audio": datasets.features.Audio(sampling_rate=16_000),
46
- "transcription": datasets.Value("string"),
47
- "speaker": datasets.Value("string"),
48
- "surface": datasets.Value("string"),
49
- "underlying": datasets.Value("string"),
50
- "gloss": datasets.Value("string"),
51
- "translation": datasets.Value("string"),
52
- }
53
- )
54
-
55
- return datasets.DatasetInfo(
56
- description=_DESCRIPTION,
57
- features=features,
58
- supervised_keys=("audio", "transcription"),
59
- homepage=_HOMEPAGE_URL,
60
- citation=_CITATION,
61
- task_templates=task_templates,
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- splits = ["train", "dev", "test"]
66
-
67
- data_urls = {split: [_DATA_URL.format(split=split)] for split in splits}
68
- meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
69
-
70
- archive_paths = dl_manager.download(data_urls)
71
- local_extracted_archives = (
72
- dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
73
- )
74
- archive_iters = {
75
- split: [dl_manager.iter_archive(path) for path in paths]
76
- for split, paths in archive_paths.items()
77
- }
78
-
79
- meta_paths = dl_manager.download(meta_urls)
80
-
81
- return [
82
- datasets.SplitGenerator(
83
- name=datasets.Split.TRAIN,
84
- gen_kwargs={
85
- "local_extracted_archives": local_extracted_archives.get(
86
- "train", [None] * len(meta_paths.get("train"))
87
- ),
88
- "archive_iters": archive_iters.get("train"),
89
- "text_paths": meta_paths.get("train"),
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.VALIDATION,
94
- gen_kwargs={
95
- "local_extracted_archives": local_extracted_archives.get(
96
- "dev", [None] * len(meta_paths.get("dev"))
97
- ),
98
- "archive_iters": archive_iters.get("dev"),
99
- "text_paths": meta_paths.get("dev"),
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.TEST,
104
- gen_kwargs={
105
- "local_extracted_archives": local_extracted_archives.get(
106
- "test", [None] * len(meta_paths.get("test"))
107
- ),
108
- "archive_iters": archive_iters.get("test"),
109
- "text_paths": meta_paths.get("test"),
110
- },
111
- ),
112
- ]
113
-
114
- def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
115
- assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
116
- key = 0
117
-
118
- for archive, text_path, local_extracted_path in zip(
119
- archive_iters, text_paths, local_extracted_archives
120
- ):
121
- with open(text_path, encoding="utf-8") as fin:
122
- data = json.load(fin)
123
-
124
- for audio_path, audio_file in archive:
125
- audio_filename = audio_path.split("/")[-1]
126
- if audio_filename not in data:
127
- continue
128
-
129
- result = data[audio_filename]
130
- extracted_audio_path = (
131
- os.path.join(local_extracted_path, audio_filename)
132
- if local_extracted_path is not None
133
- else None
134
- )
135
- result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
136
- yield key, result
137
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- license: cc-by-sa-4.0
3
- ---
4
-
5
- # Dataset Card for NINJAL Ainu Folklore
6
-
7
- ## Dataset Description
8
-
9
- - **Original source** [A Glossed Audio Corpus of Ainu folklore](https://ainu.ninjal.ac.jp/folklore/en/)
10
-
11
- ### Dataset Summary
12
-
13
-
14
- ### Annotations
15
-
16
-
17
- ## Additional Information
18
-
19
- ### Limitations
20
-
21
- ### License
22
-
23
-
24
- ### Original Source
25
-
26
- ```
27
- @misc{ninjal-ainu-folklore,
28
- title={A Glossed Audio Corpus of Ainu Folklore},
29
- url={https://ainu.ninjal.ac.jp/folklore/},
30
- author={Nakagawa, Hiroshi and Bugaeva, Anna and Kobayashi, Miki and Yoshikawa, Yoshimi},
31
- publisher={The National Institute for Japanese Language and Linguistics ({NINJAL})},
32
- date={2016--2021}
33
- }
34
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test.json → all/ninjal-ainu-folklore-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eaa06bb3304c8442f572a339b63bd6ab3aa2b41bcdf406b498bd28918e87e6bf
3
- size 163427
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ce437ca49b93b18b4311cff134e1bf79b926ab6806f21f195de2a64b315386b
3
+ size 48811516
data/audio/train.tar.gz → all/ninjal-ainu-folklore-train-00000-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2798fdee6cc7ea1f7c7eff5c1cbb31e0da8b8e32df4c6db2a0550650a580d820
3
- size 666473167
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb00f0aec37a7a37a00dfdf49098a5b1dcf96b5ef8bb275019f3949e01ba4f23
3
+ size 598130772
data/audio/dev.tar.gz → all/ninjal-ainu-folklore-train-00001-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a7a6593e92848ceb8cb1cf929772ef7e0ebf22a6fc7777c09ba481a795714a8
3
- size 37029441
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e29285c079d4310799fe13189040bee2877d7ead25b572314fba1ee37e95b6e
3
+ size 246257230
data/dev.json → all/ninjal-ainu-folklore-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:377ef480d07d62bb5990ae2325ff128cf7b65affb3e9f6204b3cb8208c5b7b2e
3
- size 164361
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e52b707cc24cac9ad0a9adaa68f42c6ecfdd0dd37318d2281bc3fbe6f2fda141
3
+ size 46884904
data/audio/test.tar.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:89b438787d9c34798d63fb6f3b0f54d73cf53163f8b37b3fbcc02fe34338ba4a
3
- size 38398388
 
 
 
 
data/train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7521df1c0f863889f633bb09b3f04c5108381e5d091fdee9629a3f5827ee4c45
3
- size 2938624