mnazari commited on
Commit
3428b9a
1 Parent(s): a5013d4

add dataset builder

Browse files
Files changed (5) hide show
  1. README.md +17 -1
  2. build.py +118 -0
  3. main.ipynb +136 -31
  4. nena_speech_1_0.py +11 -0
  5. requirements.txt +4 -3
README.md CHANGED
@@ -4,4 +4,20 @@
4
  The [Northeastern Neo-Aramaic (NENA) Database Project](https://nena.ames.cam.ac.uk/) has been aggregating language documentation materials for the NENA dialects. These materials include descriptions, including [a description of the dialect of the Assyrian Christians of Urmi](https://drive.google.com/file/d/1k7QXjjxakQN87c0p-SAcUwnxY_JbrKj9/view?usp=drive_link). This description contains 300 pages (8 hours) of transcribed and translated oral literature. These oral literatures are [actively being parsed](https://github.com/mattynaz/nena-dataset-parsing) and uploaded to a database at [pocketbase.nenadb.dev](https://pocketbase.nenadb.dev/_). The platform [crowdsource.nenadb.dev](https://crowdsource.nenadb.dev/) allows the community to directly engage with these parsed examples and contribute their own voices to the database.
5
 
6
  ## Goal
7
- The goal is to publish this dataset to [HuggingFace](https://huggingface.co/). Mozilla's [Common Voice dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0/tree/main) provides an example implementation of such a dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  The [Northeastern Neo-Aramaic (NENA) Database Project](https://nena.ames.cam.ac.uk/) has been aggregating language documentation materials for the NENA dialects. These materials include descriptions, including [a description of the dialect of the Assyrian Christians of Urmi](https://drive.google.com/file/d/1k7QXjjxakQN87c0p-SAcUwnxY_JbrKj9/view?usp=drive_link). This description contains 300 pages (8 hours) of transcribed and translated oral literature. These oral literatures are [actively being parsed](https://github.com/mattynaz/nena-dataset-parsing) and uploaded to a database at [pocketbase.nenadb.dev](https://pocketbase.nenadb.dev/_). The platform [crowdsource.nenadb.dev](https://crowdsource.nenadb.dev/) allows the community to directly engage with these parsed examples and contribute their own voices to the database.
5
 
6
  ## Goal
7
+ The goal is to publish this dataset to [HuggingFace](https://huggingface.co/). Mozilla's [Common Voice dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0/tree/main) provides an example implementation of such a dataset.
8
+
9
+ ## Development
10
+
11
+ ### Building the dataset
12
+
13
+ Install the required packages.
14
+
15
+ ```
16
+ pip install -r requirements.txt
17
+ ```
18
+
19
+ Build the dataset.
20
+
21
+ ```
22
+ python build.py --build
23
+ ```
build.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import csv
3
+ import os
4
+ import shutil
5
+ import tarfile
6
+ import tempfile
7
+ from tqdm import tqdm
8
+
9
+ from pydub import AudioSegment
10
+ import requests
11
+ from pocketbase import PocketBase
12
+
13
+ parser = argparse.ArgumentParser(description="Command description.")
14
+
15
+ pb = PocketBase('https://pocketbase.nenadb.dev/')
16
+
17
+ def get_examples():
18
+ examples = pb.collection("examples").get_full_list(query_params={
19
+ "expand": "dialect",
20
+ "filter": "validated=true",
21
+ })
22
+
23
+ return examples
24
+
25
+ def split_examples(examples, test_split=0.10, dev_split=0.10):
26
+ subsets = {}
27
+
28
+ for example in examples:
29
+ dialect = example.expand['dialect'].name.lower()
30
+ if not subsets.get(dialect):
31
+ subsets[dialect] = { 'all': [] }
32
+ subsets[dialect]['all'].append(example)
33
+
34
+ for subset in subsets.values():
35
+ for i, example in enumerate(subset['all']):
36
+ prog = i / len(subset['all'])
37
+
38
+ if prog < test_split:
39
+ split = 'test'
40
+ elif prog < dev_split + test_split:
41
+ split = 'dev'
42
+ else:
43
+ split = 'train'
44
+
45
+ if not subset.get(split):
46
+ subset[split] = []
47
+ subset[split].append(example)
48
+
49
+ del subset['all']
50
+
51
+ return subsets
52
+
53
+ def save_data(subsets):
54
+ total_examples = sum(
55
+ sum(len(split) for split in subset.values())
56
+ for subset in subsets.values()
57
+ )
58
+
59
+ with tqdm(total=total_examples) as pbar:
60
+ for dialect, subset in subsets.items():
61
+ for split, examples in subset.items():
62
+ audio_dir_path = os.path.join("audio", dialect, split)
63
+ os.makedirs(audio_dir_path, exist_ok=True)
64
+
65
+ transcripts = []
66
+ transcript_dir_path = os.path.join("transcript", dialect)
67
+ os.makedirs(transcript_dir_path, exist_ok=True)
68
+
69
+ for example in examples:
70
+ pbar.set_description(f"Downloading audios ({dialect} / {split})")
71
+ pbar.update(1)
72
+ audio_url = pb.get_file_url(example, example.speech, {})
73
+ response = requests.get(audio_url)
74
+ with tempfile.NamedTemporaryFile() as f:
75
+ f.write(response.content)
76
+ f.flush()
77
+ audio = AudioSegment.from_file(f.name)
78
+ audio = audio.set_frame_rate(48000)
79
+ audio_file_name = f"nena_speech_{example.id}.mp3"
80
+ audio_file_path = os.path.join(audio_dir_path, audio_file_name)
81
+ audio.export(audio_file_path, format="mp3")
82
+
83
+ transcripts.append({
84
+ 'age': example.age,
85
+ 'transcription': example.transcription,
86
+ 'translation': example.translation,
87
+ 'path': audio_file_name,
88
+ })
89
+
90
+ pbar.set_description(f"Saving audios ({dialect}/{split})")
91
+ audio_tar_path = f"{audio_dir_path}.tar"
92
+ with tarfile.open(audio_tar_path, 'w') as tar:
93
+ tar.add(audio_dir_path, arcname=os.path.basename(audio_dir_path))
94
+
95
+ pbar.set_description(f"Saving transcripts ({dialect} / {split})")
96
+ with open(os.path.join(transcript_dir_path, f"{split}.tsv"), 'w', newline='') as f:
97
+ writer = csv.DictWriter(f, fieldnames=transcripts[0].keys(), delimiter='\t')
98
+ writer.writeheader()
99
+ writer.writerows(transcripts)
100
+
101
+ shutil.rmtree(audio_dir_path)
102
+
103
+
104
+ if __name__ == "__main__":
105
+ parser = argparse.ArgumentParser(description="Generate text from prompt")
106
+
107
+ parser.add_argument(
108
+ "-b",
109
+ "--build",
110
+ action="store_true",
111
+ help="Download text prompts from GCS bucket",
112
+ )
113
+
114
+ args = parser.parse_args()
115
+
116
+ examples = get_examples()
117
+ subsets = split_examples(examples)
118
+ save_data(subsets)
main.ipynb CHANGED
@@ -16,22 +16,81 @@
16
  },
17
  {
18
  "cell_type": "code",
19
- "execution_count": 1,
20
  "metadata": {},
21
  "outputs": [],
22
  "source": [
23
  "from pocketbase import PocketBase\n",
24
  "\n",
25
- "pb = PocketBase('https://pocketbase.nenadb.dev/')\n",
 
26
  "\n",
27
- "dialects = pb.collection(\"dialects\").get_full_list(query_params={\n",
28
- " \"sort\": \"name\",\n",
29
- "})\n",
 
30
  "\n",
31
- "examples = pb.collection(\"examples\").get_full_list(query_params={\n",
32
- " \"expand\": \"dialect\",\n",
33
- " \"filter\": \"validated=true\",\n",
34
- "})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  ]
36
  },
37
  {
@@ -43,39 +102,85 @@
43
  },
44
  {
45
  "cell_type": "code",
46
- "execution_count": null,
47
  "metadata": {},
48
  "outputs": [],
49
  "source": [
50
  "from pydub import AudioSegment\n",
51
  "import requests\n",
52
  "import tempfile\n",
 
 
 
 
53
  "\n",
54
- "test_split = 0.10\n",
55
- "dev_split = 0.10\n",
56
- "\n",
57
- "for i, example in enumerate(examples):\n",
58
- " prog = i / len(examples)\n",
59
- " \n",
60
- " if prog < test_split:\n",
61
- " split = 'test'\n",
62
- " elif prog < dev_split + test_split:\n",
63
- " split = 'dev'\n",
64
- " else:\n",
65
- " split = 'train'\n",
66
  "\n",
67
- " audio_url = pb.get_file_url(example, example.speech, {})\n",
68
- " response = requests.get(audio_url)\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  "\n",
70
- " with tempfile.NamedTemporaryFile() as f:\n",
71
- " f.write(response.content)\n",
72
- " f.flush()\n",
73
- " audio = AudioSegment.from_file(f.name)\n",
74
  "\n",
75
- " audio = audio.set_frame_rate(48000)\n",
76
- " audio.export(f\"nena_speech_{example.id}.mp3\", format=\"mp3\")\n",
 
 
77
  "\n",
78
- " break"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  ]
80
  }
81
  ],
 
16
  },
17
  {
18
  "cell_type": "code",
19
+ "execution_count": 8,
20
  "metadata": {},
21
  "outputs": [],
22
  "source": [
23
  "from pocketbase import PocketBase\n",
24
  "\n",
25
+ "def get_examples():\n",
26
+ " pb = PocketBase('https://pocketbase.nenadb.dev/')\n",
27
  "\n",
28
+ " examples = pb.collection(\"examples\").get_full_list(query_params={\n",
29
+ " \"expand\": \"dialect\",\n",
30
+ " \"filter\": \"validated=true\",\n",
31
+ " })\n",
32
  "\n",
33
+ " return examples"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 9,
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "examples = get_examples()"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "markdown",
47
+ "metadata": {},
48
+ "source": [
49
+ "Bucket examples into subsets"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": 10,
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "def split_examples(examples, test_split=0.10, dev_split=0.10):\n",
59
+ " subsets = {}\n",
60
+ "\n",
61
+ " for example in examples:\n",
62
+ " dialect = example.expand['dialect'].name.lower()\n",
63
+ " if not subsets.get(dialect):\n",
64
+ " subsets[dialect] = { 'all': [] }\n",
65
+ " subsets[dialect]['all'].append(example)\n",
66
+ "\n",
67
+ " for subset in subsets.values():\n",
68
+ " for i, example in enumerate(subset['all']):\n",
69
+ " prog = i / len(subset['all'])\n",
70
+ "\n",
71
+ " if prog < test_split:\n",
72
+ " split = 'test'\n",
73
+ " elif prog < dev_split + test_split:\n",
74
+ " split = 'dev'\n",
75
+ " else:\n",
76
+ " split = 'train'\n",
77
+ "\n",
78
+ " if not subset.get(split):\n",
79
+ " subset[split] = []\n",
80
+ " subset[split].append(example)\n",
81
+ " \n",
82
+ " del subset['all']\n",
83
+ "\n",
84
+ " return subsets"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "code",
89
+ "execution_count": 11,
90
+ "metadata": {},
91
+ "outputs": [],
92
+ "source": [
93
+ "subsets = split_examples(examples)"
94
  ]
95
  },
96
  {
 
102
  },
103
  {
104
  "cell_type": "code",
105
+ "execution_count": 24,
106
  "metadata": {},
107
  "outputs": [],
108
  "source": [
109
  "from pydub import AudioSegment\n",
110
  "import requests\n",
111
  "import tempfile\n",
112
+ "import tarfile\n",
113
+ "import shutil\n",
114
+ "import os\n",
115
+ "import csv\n",
116
  "\n",
117
+ "def save_data(subsets):\n",
118
+ " for dialect, subset in subsets.items():\n",
119
+ " for split, examples in subset.items():\n",
120
+ " audio_dir_path = os.path.join(\"audio\", dialect, split)\n",
121
+ " os.makedirs(audio_dir_path, exist_ok=True)\n",
 
 
 
 
 
 
 
122
  "\n",
123
+ " transcripts = []\n",
124
+ " transcript_dir_path = os.path.join(\"transcript\", dialect)\n",
125
+ " os.makedirs(transcript_dir_path, exist_ok=True)\n",
126
+ " \n",
127
+ " for example in examples:\n",
128
+ " pb = PocketBase('https://pocketbase.nenadb.dev/')\n",
129
+ " audio_url = pb.get_file_url(example, example.speech, {})\n",
130
+ " response = requests.get(audio_url)\n",
131
+ " with tempfile.NamedTemporaryFile() as f:\n",
132
+ " f.write(response.content)\n",
133
+ " f.flush()\n",
134
+ " audio = AudioSegment.from_file(f.name)\n",
135
+ " audio = audio.set_frame_rate(48000)\n",
136
+ " audio_file_name = f\"nena_speech_{example.id}.mp3\"\n",
137
+ " audio_file_path = os.path.join(audio_dir_path, audio_file_name)\n",
138
+ " audio.export(audio_file_path, format=\"mp3\")\n",
139
+ " \n",
140
+ " transcripts.append({\n",
141
+ " 'age': example.age,\n",
142
+ " 'transcription': example.transcription,\n",
143
+ " 'translation': example.translation,\n",
144
+ " 'path': audio_file_name,\n",
145
+ " })\n",
146
  "\n",
147
+ " audio_tar_path = f\"{audio_dir_path}.tar\"\n",
148
+ " with tarfile.open(audio_tar_path, 'w') as tar:\n",
149
+ " tar.add(audio_dir_path, arcname=os.path.basename(audio_dir_path))\n",
 
150
  "\n",
151
+ " with open(os.path.join(transcript_dir_path, f\"{split}.tsv\"), 'w', newline='') as f:\n",
152
+ " writer = csv.DictWriter(f, fieldnames=transcripts[0].keys(), delimiter='\\t')\n",
153
+ " writer.writeheader()\n",
154
+ " writer.writerows(transcripts)\n",
155
  "\n",
156
+ " shutil.rmtree(audio_dir_path)"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "code",
161
+ "execution_count": 25,
162
+ "metadata": {},
163
+ "outputs": [
164
+ {
165
+ "ename": "KeyboardInterrupt",
166
+ "evalue": "",
167
+ "output_type": "error",
168
+ "traceback": [
169
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
170
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
171
+ "\u001b[1;32m/Users/matthew/Documents/nenadb/dataloader/main.ipynb Cell 10\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m save_data(subsets)\n",
172
+ "\u001b[1;32m/Users/matthew/Documents/nenadb/dataloader/main.ipynb Cell 10\u001b[0m line \u001b[0;36m2\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=23'>24</a>\u001b[0m f\u001b[39m.\u001b[39mwrite(response\u001b[39m.\u001b[39mcontent)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=24'>25</a>\u001b[0m f\u001b[39m.\u001b[39mflush()\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=25'>26</a>\u001b[0m audio \u001b[39m=\u001b[39m AudioSegment\u001b[39m.\u001b[39;49mfrom_file(f\u001b[39m.\u001b[39;49mname)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=26'>27</a>\u001b[0m audio \u001b[39m=\u001b[39m audio\u001b[39m.\u001b[39mset_frame_rate(\u001b[39m48000\u001b[39m)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/matthew/Documents/nenadb/dataloader/main.ipynb#X10sZmlsZQ%3D%3D?line=27'>28</a>\u001b[0m audio_file_name \u001b[39m=\u001b[39m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mnena_speech_\u001b[39m\u001b[39m{\u001b[39;00mexample\u001b[39m.\u001b[39mid\u001b[39m}\u001b[39;00m\u001b[39m.mp3\u001b[39m\u001b[39m\"\u001b[39m\n",
173
+ "File \u001b[0;32m~/Documents/nenadb/dataloader/venv/lib/python3.11/site-packages/pydub/audio_segment.py:728\u001b[0m, in \u001b[0;36mAudioSegment.from_file\u001b[0;34m(cls, file, format, codec, parameters, start_second, duration, **kwargs)\u001b[0m\n\u001b[1;32m 726\u001b[0m info \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 727\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 728\u001b[0m info \u001b[39m=\u001b[39m mediainfo_json(orig_file, read_ahead_limit\u001b[39m=\u001b[39;49mread_ahead_limit)\n\u001b[1;32m 729\u001b[0m \u001b[39mif\u001b[39;00m info:\n\u001b[1;32m 730\u001b[0m audio_streams \u001b[39m=\u001b[39m [x \u001b[39mfor\u001b[39;00m x \u001b[39min\u001b[39;00m info[\u001b[39m'\u001b[39m\u001b[39mstreams\u001b[39m\u001b[39m'\u001b[39m]\n\u001b[1;32m 731\u001b[0m \u001b[39mif\u001b[39;00m x[\u001b[39m'\u001b[39m\u001b[39mcodec_type\u001b[39m\u001b[39m'\u001b[39m] \u001b[39m==\u001b[39m \u001b[39m'\u001b[39m\u001b[39maudio\u001b[39m\u001b[39m'\u001b[39m]\n",
174
+ "File \u001b[0;32m~/Documents/nenadb/dataloader/venv/lib/python3.11/site-packages/pydub/utils.py:275\u001b[0m, in \u001b[0;36mmediainfo_json\u001b[0;34m(filepath, read_ahead_limit)\u001b[0m\n\u001b[1;32m 273\u001b[0m command \u001b[39m=\u001b[39m [prober, \u001b[39m'\u001b[39m\u001b[39m-of\u001b[39m\u001b[39m'\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mjson\u001b[39m\u001b[39m'\u001b[39m] \u001b[39m+\u001b[39m command_args\n\u001b[1;32m 274\u001b[0m res \u001b[39m=\u001b[39m Popen(command, stdin\u001b[39m=\u001b[39mstdin_parameter, stdout\u001b[39m=\u001b[39mPIPE, stderr\u001b[39m=\u001b[39mPIPE)\n\u001b[0;32m--> 275\u001b[0m output, stderr \u001b[39m=\u001b[39m res\u001b[39m.\u001b[39;49mcommunicate(\u001b[39minput\u001b[39;49m\u001b[39m=\u001b[39;49mstdin_data)\n\u001b[1;32m 276\u001b[0m output \u001b[39m=\u001b[39m output\u001b[39m.\u001b[39mdecode(\u001b[39m\"\u001b[39m\u001b[39mutf-8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mignore\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 277\u001b[0m stderr \u001b[39m=\u001b[39m stderr\u001b[39m.\u001b[39mdecode(\u001b[39m\"\u001b[39m\u001b[39mutf-8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m'\u001b[39m\u001b[39mignore\u001b[39m\u001b[39m'\u001b[39m)\n",
175
+ "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/subprocess.py:1209\u001b[0m, in \u001b[0;36mPopen.communicate\u001b[0;34m(self, input, timeout)\u001b[0m\n\u001b[1;32m 1206\u001b[0m endtime \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1208\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m-> 1209\u001b[0m stdout, stderr \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_communicate(\u001b[39minput\u001b[39;49m, endtime, timeout)\n\u001b[1;32m 1210\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mKeyboardInterrupt\u001b[39;00m:\n\u001b[1;32m 1211\u001b[0m \u001b[39m# https://bugs.python.org/issue25942\u001b[39;00m\n\u001b[1;32m 1212\u001b[0m \u001b[39m# See the detailed comment in .wait().\u001b[39;00m\n\u001b[1;32m 1213\u001b[0m \u001b[39mif\u001b[39;00m timeout \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n",
176
+ "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/subprocess.py:2108\u001b[0m, in \u001b[0;36mPopen._communicate\u001b[0;34m(self, input, endtime, orig_timeout)\u001b[0m\n\u001b[1;32m 2101\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_check_timeout(endtime, orig_timeout,\n\u001b[1;32m 2102\u001b[0m stdout, stderr,\n\u001b[1;32m 2103\u001b[0m skip_check_and_raise\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[1;32m 2104\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m( \u001b[39m# Impossible :)\u001b[39;00m\n\u001b[1;32m 2105\u001b[0m \u001b[39m'\u001b[39m\u001b[39m_check_timeout(..., skip_check_and_raise=True) \u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 2106\u001b[0m \u001b[39m'\u001b[39m\u001b[39mfailed to raise TimeoutExpired.\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m-> 2108\u001b[0m ready \u001b[39m=\u001b[39m selector\u001b[39m.\u001b[39;49mselect(timeout)\n\u001b[1;32m 2109\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_check_timeout(endtime, orig_timeout, stdout, stderr)\n\u001b[1;32m 2111\u001b[0m \u001b[39m# XXX Rewrite these to use non-blocking I/O on the file\u001b[39;00m\n\u001b[1;32m 2112\u001b[0m \u001b[39m# objects; they are no longer using C stdio!\u001b[39;00m\n",
177
+ "File \u001b[0;32m/opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/lib/python3.11/selectors.py:415\u001b[0m, in \u001b[0;36m_PollLikeSelector.select\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 413\u001b[0m ready \u001b[39m=\u001b[39m []\n\u001b[1;32m 414\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 415\u001b[0m fd_event_list \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_selector\u001b[39m.\u001b[39mpoll(timeout)\n\u001b[1;32m 416\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mInterruptedError\u001b[39;00m:\n\u001b[1;32m 417\u001b[0m \u001b[39mreturn\u001b[39;00m ready\n",
178
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
179
+ ]
180
+ }
181
+ ],
182
+ "source": [
183
+ "save_data(subsets)"
184
  ]
185
  }
186
  ],
nena_speech_1_0.py CHANGED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ NENA Speech Dataset"""
2
+
3
+
4
+ import datasets
5
+
6
+ class NENASpeechConfig(datasets.BuilderConfig):
7
+ """BuilderConfig for NENASpeech."""
8
+ pass
9
+
10
+ class NENASpeech(datasets.GeneratorBasedBuilder):
11
+ pass
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
- torchaudio
2
- torch
3
- pocketbase
4
  datasets
 
5
  pydub
6
  requests
 
 
 
 
 
 
 
1
  datasets
2
+ pocketbase
3
  pydub
4
  requests
5
+ torch
6
+ torchaudio
7
+ tqdm