{
"cells": [
{
"cell_type": "code",
"execution_count": 46,
"id": "18e8d8ed",
"metadata": {},
"outputs": [],
"source": [
"import textgrid\n",
"import re\n",
"import soundfile as sf\n",
"from glob import glob\n",
"from transformers import AutoTokenizer\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained('openai/whisper-large-v3')"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "ebf7bbf7",
"metadata": {},
"outputs": [],
"source": [
"timestamps = [i * 0.02 for i in range(1500 + 1)]"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "93085eb5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"37"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"files = glob('TextGrid/*.TextGrid')\n",
"len(files)"
]
},
{
"cell_type": "code",
"execution_count": 127,
"id": "4e6a5405",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
]
}
],
"source": [
"!rm -rf audio\n",
"!mkdir audio"
]
},
{
"cell_type": "code",
"execution_count": 128,
"id": "a6eb80d6",
"metadata": {},
"outputs": [],
"source": [
"import textgrid\n",
"\n",
"alls = []\n",
"for f in files:\n",
" wav_file = f.replace('TextGrid/', 'wav/').replace('.TextGrid', '.wav')\n",
" y, sr = sf.read(wav_file)\n",
" tg = textgrid.TextGrid.fromFile(f)\n",
" \n",
" grids = []\n",
" for tg_ in tg[:2]:\n",
" for s in tg_:\n",
" grids.append(s)\n",
" grids = sorted(grids, key = lambda x: x.minTime)\n",
" \n",
" segments, temp = [], []\n",
" temp_length = 0\n",
" for s in grids:\n",
" start = s.minTime\n",
" end = s.maxTime\n",
" text = s.mark\n",
" duration = end - start\n",
" if duration + temp_length >= 30:\n",
" segments.append(temp)\n",
" temp = []\n",
" temp_length = 0\n",
" else:\n",
" temp.append(s)\n",
" temp_length += duration\n",
" \n",
" if len(temp):\n",
" segments.append(temp)\n",
" \n",
" for no, s in enumerate(segments):\n",
" accept = 0\n",
" for s_ in s:\n",
" text = s_.mark\n",
" if len(text) > 2:\n",
" accept += 1\n",
"\n",
" if accept < 1:\n",
" continue\n",
"\n",
" start_at = s[0].minTime\n",
" filtered = []\n",
" for s_ in s:\n",
" start = s_.minTime - start_at\n",
" end = s_.maxTime - start_at\n",
" text = s_.mark\n",
" if len(text) < 2:\n",
" continue\n",
"\n",
" start = min(timestamps, key=lambda t: abs(t - start))\n",
" end = min(timestamps, key=lambda t: abs(t - end))\n",
"\n",
" filtered.append(f\"<|{start:.2f}|> {text}<|{end:.2f}|>\")\n",
"\n",
" y_ = y[int(s[0].minTime * sr): int(s[-1].maxTime * sr)]\n",
" new_f = f.replace('TextGrid/', 'audio/').replace('.TextGrid', '') + f'-{no}.mp3'\n",
" sf.write(new_f, y_, sr)\n",
" filtered = ''.join(filtered)\n",
" text = f\"<|startoftranscript|><|ms|><|transcribe|>{filtered}<|endoftext|>\"\n",
" \n",
" alls.append({\n",
" 'audio_filename': new_f,\n",
" 'text': text\n",
" })"
]
},
{
"cell_type": "code",
"execution_count": 129,
"id": "7e88af44",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['<|startoftranscript|>',\n",
" '<|ms|>',\n",
" '<|transcribe|>',\n",
" '<|0.46|>',\n",
" 'ĠAss',\n",
" 'al',\n",
" 'amm',\n",
" 'ual',\n",
" 'a',\n",
" 'ikum',\n",
" ',',\n",
" 'Ġpak',\n",
" 'Ġc',\n",
" 'ik',\n",
" '<|2.24|>',\n",
" '<|2.24|>',\n",
" 'ĠWa',\n",
" 'ala',\n",
" 'ikum',\n",
" 'm',\n",
" 'uss',\n",
" 'alam',\n",
" 'Ġw',\n",
" 'bt',\n",
" '<|5.44|>',\n",
" '<|5.76|>',\n",
" 'ĠBa',\n",
" 'ik',\n",
" 'lah',\n",
" ',',\n",
" 'Ġso',\n",
" 'alan',\n",
" 'Ġpertama',\n",
" 'Ġyang',\n",
" 'Ġing',\n",
" 'in',\n",
" 'Ġsaya',\n",
" 'Ġt',\n",
" 'anya',\n",
" 'Ġap',\n",
" 'akah',\n",
" 'Ġsatu',\n",
" 'Ġcer',\n",
" 'ita',\n",
" 'Ġtrad',\n",
" 'is',\n",
" 'ional',\n",
" 'Ġyang',\n",
" 'Ġanda',\n",
" 'Ġing',\n",
" 'ati',\n",
" '?',\n",
" '<|11.98|>',\n",
" '<|11.98|>',\n",
" 'ĠM',\n",
" 'akan',\n",
" 'Ġsec',\n",
" 'ara',\n",
" 'Ġseper',\n",
" 'ah',\n",
" '<|13.34|>',\n",
" '<|endoftext|>']"
]
},
"execution_count": 129,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tokenizer.tokenize(alls[4]['text'])"
]
},
{
"cell_type": "code",
"execution_count": 130,
"id": "f3cec209",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
" \n",
" "
],
"text/plain": [
""
]
},
"execution_count": 130,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import IPython.display as ipd\n",
"ipd.Audio(alls[4]['audio_filename'])"
]
},
{
"cell_type": "code",
"execution_count": 132,
"id": "a5ab573c",
"metadata": {},
"outputs": [],
"source": [
"# !zip -r sarawakmalay.zip audio"
]
},
{
"cell_type": "code",
"execution_count": 134,
"id": "87d39947",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"with open('dataset.json', 'w') as fopen:\n",
" json.dump(alls, fopen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4bc46f74",
"metadata": {},
"outputs": [],
"source": [
"from huggingface_hub import HfApi\n",
"api = HfApi()\n",
"api.upload_file(\n",
" path_or_fileobj=\"dataset.json\",\n",
" path_in_repo=\"README.md\",\n",
" repo_id=\"username/test-dataset\",\n",
" repo_type=\"dataset\",\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "python3.10",
"language": "python",
"name": "python3.10"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 5
}