Commit
•
d067682
0
Parent(s):
add initial files
Browse files- .gitignore +5 -0
- README.md +7 -0
- main.ipynb +104 -0
- nena_speech_1_0.py +0 -0
- requirements.txt +6 -0
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pyhton
|
2 |
+
venv
|
3 |
+
|
4 |
+
# MacOS
|
5 |
+
.DS_Store
|
README.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# NENA Speech Dataset
|
2 |
+
|
3 |
+
## Context
|
4 |
+
The [Northeastern Neo-Aramaic (NENA) Database Project](https://nena.ames.cam.ac.uk/), lead by Professor Geoffrey Khan, has been collecting language documentation materials for the NENA dialects. These materials include [a description of the dialect of the Assyrian Christians of Urmi](https://drive.google.com/file/d/1k7QXjjxakQN87c0p-SAcUwnxY_JbrKj9/view?usp=drive_link). This description contains 300 pages of transcribed and translated oral literature. These language documentation materials are [actively being parsed](https://github.com/mattynaz/nena-dataset-parsing) and then placed into a database at [pocketbase.nenadb.dev](https://pocketbase.nenadb.dev/_). The platform [assyrianspeech.com/chaldeanspeech.com/aramaicspeech.com](https://crowdsource.nenadb.dev/) allows the community to directly engage with these parsed examples and contribute their own voices to the database.
|
5 |
+
|
6 |
+
## Goal
|
7 |
+
The goal is to publish this dataset to [HuggingFace](https://huggingface.co/). Mozilla's [Common Voice dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0/tree/main) provides an example implementation of such a dataset.
|
main.ipynb
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Creating the NENA Speech Dataset"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "markdown",
|
12 |
+
"metadata": {},
|
13 |
+
"source": [
|
14 |
+
"Download validated examples from Pocketbase"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": 1,
|
20 |
+
"metadata": {},
|
21 |
+
"outputs": [],
|
22 |
+
"source": [
|
23 |
+
"from pocketbase import PocketBase\n",
|
24 |
+
"\n",
|
25 |
+
"pb = PocketBase('https://pocketbase.nenadb.dev/')\n",
|
26 |
+
"\n",
|
27 |
+
"dialects = pb.collection(\"dialects\").get_full_list(query_params={\n",
|
28 |
+
" \"sort\": \"name\",\n",
|
29 |
+
"})\n",
|
30 |
+
"\n",
|
31 |
+
"examples = pb.collection(\"examples\").get_full_list(query_params={\n",
|
32 |
+
" \"expand\": \"dialect\",\n",
|
33 |
+
" \"filter\": \"validated=true\",\n",
|
34 |
+
"})"
|
35 |
+
]
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"cell_type": "markdown",
|
39 |
+
"metadata": {},
|
40 |
+
"source": [
|
41 |
+
"Create shards"
|
42 |
+
]
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"cell_type": "code",
|
46 |
+
"execution_count": null,
|
47 |
+
"metadata": {},
|
48 |
+
"outputs": [],
|
49 |
+
"source": [
|
50 |
+
"from pydub import AudioSegment\n",
|
51 |
+
"import requests\n",
|
52 |
+
"import tempfile\n",
|
53 |
+
"\n",
|
54 |
+
"test_split = 0.10\n",
|
55 |
+
"dev_split = 0.10\n",
|
56 |
+
"\n",
|
57 |
+
"for i, example in enumerate(examples):\n",
|
58 |
+
" prog = i / len(examples)\n",
|
59 |
+
" \n",
|
60 |
+
" if prog < test_split:\n",
|
61 |
+
" split = 'test'\n",
|
62 |
+
" elif prog < dev_split + test_split:\n",
|
63 |
+
" split = 'dev'\n",
|
64 |
+
" else:\n",
|
65 |
+
" split = 'train'\n",
|
66 |
+
"\n",
|
67 |
+
" audio_url = pb.get_file_url(example, example.speech, {})\n",
|
68 |
+
" response = requests.get(audio_url)\n",
|
69 |
+
"\n",
|
70 |
+
" with tempfile.NamedTemporaryFile() as f:\n",
|
71 |
+
" f.write(response.content)\n",
|
72 |
+
" f.flush()\n",
|
73 |
+
" audio = AudioSegment.from_file(f.name)\n",
|
74 |
+
"\n",
|
75 |
+
" audio = audio.set_frame_rate(48000)\n",
|
76 |
+
" audio.export(f\"nena_speech_{example.id}.mp3\", format=\"mp3\")\n",
|
77 |
+
"\n",
|
78 |
+
" break"
|
79 |
+
]
|
80 |
+
}
|
81 |
+
],
|
82 |
+
"metadata": {
|
83 |
+
"kernelspec": {
|
84 |
+
"display_name": "venv",
|
85 |
+
"language": "python",
|
86 |
+
"name": "python3"
|
87 |
+
},
|
88 |
+
"language_info": {
|
89 |
+
"codemirror_mode": {
|
90 |
+
"name": "ipython",
|
91 |
+
"version": 3
|
92 |
+
},
|
93 |
+
"file_extension": ".py",
|
94 |
+
"mimetype": "text/x-python",
|
95 |
+
"name": "python",
|
96 |
+
"nbconvert_exporter": "python",
|
97 |
+
"pygments_lexer": "ipython3",
|
98 |
+
"version": "3.11.5"
|
99 |
+
},
|
100 |
+
"orig_nbformat": 4
|
101 |
+
},
|
102 |
+
"nbformat": 4,
|
103 |
+
"nbformat_minor": 2
|
104 |
+
}
|
nena_speech_1_0.py
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torchaudio
|
2 |
+
torch
|
3 |
+
pocketbase
|
4 |
+
datasets
|
5 |
+
pydub
|
6 |
+
requests
|