Upload 9 files
Browse files- EDA.ipynb +0 -0
- actual_transcribed.csv +0 -0
- test_data.csv +0 -0
- train_data.csv +0 -0
- transcribed_and_reviewed.csv +0 -0
- validation_data.csv +0 -0
- vocab.json +1 -0
- xls-r-yogera.ipynb +1067 -0
- xlsr-train.py +354 -0
EDA.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
actual_transcribed.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test_data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
transcribed_and_reviewed.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
validation_data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"'": 1, "1": 2, "6": 3, "9": 4, "a": 5, "b": 6, "c": 7, "d": 8, "e": 9, "f": 10, "g": 11, "h": 12, "i": 13, "j": 14, "k": 15, "l": 16, "m": 17, "n": 18, "o": 19, "p": 20, "q": 21, "r": 22, "s": 23, "t": 24, "u": 25, "v": 26, "w": 27, "y": 28, "z": 29, "\u00e0": 30, "\u014b": 31, "|": 0, "[UNK]": 32, "[PAD]": 33}
|
xls-r-yogera.ipynb
ADDED
@@ -0,0 +1,1067 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "c12f1025-2445-43f0-9c76-c8f4bff2502a",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"%%capture\n",
|
11 |
+
"!pip install datasets\n",
|
12 |
+
"!pip install --upgrade transformers\n",
|
13 |
+
"!pip install torchaudio\n",
|
14 |
+
"!pip install huggingface_hub\n",
|
15 |
+
"!pip install jiwer evaluate wandb\n",
|
16 |
+
"!pip install librosa\n",
|
17 |
+
"!pip install accelerate -U"
|
18 |
+
]
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"cell_type": "code",
|
22 |
+
"execution_count": null,
|
23 |
+
"id": "3d55e844-cb32-4b6b-a073-5a0f5c393242",
|
24 |
+
"metadata": {
|
25 |
+
"scrolled": true
|
26 |
+
},
|
27 |
+
"outputs": [],
|
28 |
+
"source": [
|
29 |
+
"!pip install transformers[torch]\n",
|
30 |
+
"!pip install accelerate==0.26.0"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"cell_type": "code",
|
35 |
+
"execution_count": 1,
|
36 |
+
"id": "0a8323f5-3d10-4ced-8cee-39497ad1ab16",
|
37 |
+
"metadata": {},
|
38 |
+
"outputs": [],
|
39 |
+
"source": [
|
40 |
+
"import os\n",
|
41 |
+
"import re\n",
|
42 |
+
"import unicodedata\n",
|
43 |
+
"import json\n",
|
44 |
+
"import numpy as np\n",
|
45 |
+
"import pandas as pd\n",
|
46 |
+
"import wandb\n",
|
47 |
+
"import librosa\n",
|
48 |
+
"\n",
|
49 |
+
"from datasets import load_dataset, DatasetDict, Audio\n",
|
50 |
+
"from transformers import (\n",
|
51 |
+
" Wav2Vec2CTCTokenizer,\n",
|
52 |
+
" Wav2Vec2FeatureExtractor,\n",
|
53 |
+
" Wav2Vec2Processor,\n",
|
54 |
+
" Wav2Vec2ForCTC,\n",
|
55 |
+
" TrainingArguments,\n",
|
56 |
+
" Trainer,\n",
|
57 |
+
" EarlyStoppingCallback,\n",
|
58 |
+
")\n",
|
59 |
+
"import evaluate\n",
|
60 |
+
"import torch\n",
|
61 |
+
"from dataclasses import dataclass\n",
|
62 |
+
"from typing import Dict, List, Union\n"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"execution_count": 2,
|
68 |
+
"id": "c5410b60-2f77-4d9e-a5c8-df57c6abd31b",
|
69 |
+
"metadata": {},
|
70 |
+
"outputs": [
|
71 |
+
{
|
72 |
+
"name": "stdout",
|
73 |
+
"output_type": "stream",
|
74 |
+
"text": [
|
75 |
+
"Cpu count: 96\n",
|
76 |
+
"Num proc: 48\n",
|
77 |
+
"Num dataloaders: 48\n"
|
78 |
+
]
|
79 |
+
}
|
80 |
+
],
|
81 |
+
"source": [
|
82 |
+
"num_proc = os.cpu_count()//2\n",
|
83 |
+
"num_dataloaders = os.cpu_count()//2\n",
|
84 |
+
"\n",
|
85 |
+
"print(f\"Cpu count: {os.cpu_count()}\\nNum proc: {num_proc}\\nNum dataloaders: {num_dataloaders}\")"
|
86 |
+
]
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"cell_type": "code",
|
90 |
+
"execution_count": null,
|
91 |
+
"id": "b3770500-8e3f-4e75-9cf0-31af2e942cdb",
|
92 |
+
"metadata": {},
|
93 |
+
"outputs": [],
|
94 |
+
"source": [
|
95 |
+
"from huggingface_hub import notebook_login\n",
|
96 |
+
"\n",
|
97 |
+
"notebook_login()"
|
98 |
+
]
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"cell_type": "code",
|
102 |
+
"execution_count": 3,
|
103 |
+
"id": "ccdeb15e-b579-437e-ae43-17941888a04e",
|
104 |
+
"metadata": {},
|
105 |
+
"outputs": [],
|
106 |
+
"source": [
|
107 |
+
"from datasets import load_dataset, Audio\n",
|
108 |
+
"\n",
|
109 |
+
"data_files = {\n",
|
110 |
+
" 'train': 'train_data.csv',\n",
|
111 |
+
" 'validation': 'validation_data.csv',\n",
|
112 |
+
" 'test': 'test_data.csv'\n",
|
113 |
+
"}\n",
|
114 |
+
"\n",
|
115 |
+
"dataset = load_dataset('csv', data_files=data_files)\n"
|
116 |
+
]
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"cell_type": "code",
|
120 |
+
"execution_count": 4,
|
121 |
+
"id": "ec4be5eb-bac6-470d-b223-ff48fa6afa2f",
|
122 |
+
"metadata": {},
|
123 |
+
"outputs": [],
|
124 |
+
"source": [
|
125 |
+
"columns_to_keep = ['audio_path', 'sentence', 'audio_len', 'transcript_len']\n",
|
126 |
+
"dataset = dataset.map(lambda batch: {k: batch[k] for k in columns_to_keep}, remove_columns=dataset['train'].column_names)\n"
|
127 |
+
]
|
128 |
+
},
|
129 |
+
{
|
130 |
+
"cell_type": "code",
|
131 |
+
"execution_count": 5,
|
132 |
+
"id": "f7d50095-672b-4e34-908b-400d8beed076",
|
133 |
+
"metadata": {},
|
134 |
+
"outputs": [],
|
135 |
+
"source": [
|
136 |
+
"dataset = dataset.rename_column('audio_path', 'audio')\n",
|
137 |
+
"dataset = dataset.cast_column('audio', Audio(sampling_rate=16_000))"
|
138 |
+
]
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"cell_type": "code",
|
142 |
+
"execution_count": 6,
|
143 |
+
"id": "0254701e-37d1-471f-9399-9e44eb5ca2ea",
|
144 |
+
"metadata": {},
|
145 |
+
"outputs": [],
|
146 |
+
"source": [
|
147 |
+
"train_dataset = dataset['train']\n",
|
148 |
+
"eval_dataset = dataset['validation']\n",
|
149 |
+
"test_dataset = dataset['test']\n"
|
150 |
+
]
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"cell_type": "code",
|
154 |
+
"execution_count": null,
|
155 |
+
"id": "eef15786-00b0-4963-bd9b-ee668b62702c",
|
156 |
+
"metadata": {},
|
157 |
+
"outputs": [],
|
158 |
+
"source": []
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"cell_type": "code",
|
162 |
+
"execution_count": 7,
|
163 |
+
"id": "efa4f50a-98de-4a64-a284-a17a6222737d",
|
164 |
+
"metadata": {},
|
165 |
+
"outputs": [],
|
166 |
+
"source": [
|
167 |
+
"def preprocess_text(batch):\n",
|
168 |
+
" # Convert to lowercase\n",
|
169 |
+
" batch['sentence'] = batch['sentence'].lower()\n",
|
170 |
+
" \n",
|
171 |
+
" # Normalize text\n",
|
172 |
+
" batch['sentence'] = unicodedata.normalize('NFKC', batch['sentence'])\n",
|
173 |
+
" batch['sentence'] = re.sub(r\"[\\’\\ʻ\\ʼ\\ʽ\\‘]\", \"'\", batch['sentence'])\n",
|
174 |
+
" \n",
|
175 |
+
" # Remove punctuation and special characters (except apostrophes)\n",
|
176 |
+
" batch['sentence'] = re.sub(r\"[^\\w\\s']\", '', batch['sentence'])\n",
|
177 |
+
" batch['sentence'] = re.sub(r'_', ' ', batch['sentence'])\n",
|
178 |
+
" \n",
|
179 |
+
" # Remove excessive whitespace\n",
|
180 |
+
" batch['sentence'] = ' '.join(batch['sentence'].split())\n",
|
181 |
+
" \n",
|
182 |
+
" return batch\n",
|
183 |
+
"\n",
|
184 |
+
"\n",
|
185 |
+
"train_dataset = train_dataset.map(preprocess_text)\n",
|
186 |
+
"eval_dataset = eval_dataset.map(preprocess_text)\n",
|
187 |
+
"test_dataset = test_dataset.map(preprocess_text)"
|
188 |
+
]
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"cell_type": "code",
|
192 |
+
"execution_count": 8,
|
193 |
+
"id": "34d9a64b-0ba6-44df-a448-341fe2569311",
|
194 |
+
"metadata": {},
|
195 |
+
"outputs": [],
|
196 |
+
"source": [
|
197 |
+
"def get_len(batch):\n",
|
198 |
+
" # Compute audio length if not already computed\n",
|
199 |
+
" if batch.get('audio_len') is None or batch['audio_len'] == 0.0:\n",
|
200 |
+
" audio = batch['audio']\n",
|
201 |
+
" audio_len = librosa.get_duration(y=audio['array'], sr=audio['sampling_rate'])\n",
|
202 |
+
" batch['audio_len'] = audio_len\n",
|
203 |
+
" \n",
|
204 |
+
" # Compute transcript length if not already computed\n",
|
205 |
+
" if batch.get('transcript_len') is None or batch['transcript_len'] == 0:\n",
|
206 |
+
" batch['transcript_len'] = len(batch['sentence'])\n",
|
207 |
+
" \n",
|
208 |
+
" # Compute length ratio\n",
|
209 |
+
" batch['len_ratio'] = float(batch['audio_len']) / float(batch['transcript_len']) if batch['transcript_len'] > 0 else 0.0\n",
|
210 |
+
" \n",
|
211 |
+
" # Number of feature vectors (assuming 20ms frame shift)\n",
|
212 |
+
" batch['num_feature_vecs'] = int(np.round(batch['audio_len'] * 1000 / 20))\n",
|
213 |
+
" \n",
|
214 |
+
" return batch\n",
|
215 |
+
"\n",
|
216 |
+
"\n",
|
217 |
+
"train_dataset = train_dataset.map(get_len)\n",
|
218 |
+
"eval_dataset = eval_dataset.map(get_len)\n",
|
219 |
+
"test_dataset = test_dataset.map(get_len)\n"
|
220 |
+
]
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"cell_type": "code",
|
224 |
+
"execution_count": 9,
|
225 |
+
"id": "2a69b4ee-a441-4696-aded-5f5c71db8b22",
|
226 |
+
"metadata": {},
|
227 |
+
"outputs": [
|
228 |
+
{
|
229 |
+
"name": "stdout",
|
230 |
+
"output_type": "stream",
|
231 |
+
"text": [
|
232 |
+
"Train dataset size: 3151 samples\n",
|
233 |
+
"Validation dataset size: 647 samples\n",
|
234 |
+
"Test dataset size: 648 samples\n",
|
235 |
+
"Total training audio duration: 20.00 hours\n",
|
236 |
+
"Total validation audio duration: 4.10 hours\n",
|
237 |
+
"Total test audio duration: 4.06 hours\n"
|
238 |
+
]
|
239 |
+
}
|
240 |
+
],
|
241 |
+
"source": [
|
242 |
+
"print(f\"Train dataset size: {len(train_dataset)} samples\")\n",
|
243 |
+
"print(f\"Validation dataset size: {len(eval_dataset)} samples\")\n",
|
244 |
+
"print(f\"Test dataset size: {len(test_dataset)} samples\")\n",
|
245 |
+
"\n",
|
246 |
+
"print(f\"Total training audio duration: {sum(train_dataset['audio_len']) / 3600:.2f} hours\")\n",
|
247 |
+
"print(f\"Total validation audio duration: {sum(eval_dataset['audio_len']) / 3600:.2f} hours\")\n",
|
248 |
+
"print(f\"Total test audio duration: {sum(test_dataset['audio_len']) / 3600:.2f} hours\")\n"
|
249 |
+
]
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"cell_type": "code",
|
253 |
+
"execution_count": 10,
|
254 |
+
"id": "cbc97db6-4e44-4aea-8e8d-8575785cdf84",
|
255 |
+
"metadata": {},
|
256 |
+
"outputs": [
|
257 |
+
{
|
258 |
+
"name": "stdout",
|
259 |
+
"output_type": "stream",
|
260 |
+
"text": [
|
261 |
+
"Alphabet: '169abcdefghijklmnopqrstuvwyzàŋ\n"
|
262 |
+
]
|
263 |
+
}
|
264 |
+
],
|
265 |
+
"source": [
|
266 |
+
"def extract_all_chars(batch):\n",
|
267 |
+
" all_text = ' '.join(batch['sentence'])\n",
|
268 |
+
" vocab = list(set(all_text))\n",
|
269 |
+
" return {'vocab': [vocab]}\n",
|
270 |
+
"\n",
|
271 |
+
"vocab_list = train_dataset.map(\n",
|
272 |
+
" extract_all_chars,\n",
|
273 |
+
" batched=True,\n",
|
274 |
+
" batch_size=-1,\n",
|
275 |
+
" remove_columns=train_dataset.column_names\n",
|
276 |
+
")\n",
|
277 |
+
"\n",
|
278 |
+
"vocab_set = set()\n",
|
279 |
+
"for vocab in vocab_list['vocab']:\n",
|
280 |
+
" vocab_set.update(vocab)\n",
|
281 |
+
"\n",
|
282 |
+
"# Ensure space character is included\n",
|
283 |
+
"if ' ' not in vocab_set:\n",
|
284 |
+
" vocab_set.add(' ')\n",
|
285 |
+
"\n",
|
286 |
+
"alphabet = ''.join(sorted(vocab_set))\n",
|
287 |
+
"print(f\"Alphabet: {alphabet}\")\n"
|
288 |
+
]
|
289 |
+
},
|
290 |
+
{
|
291 |
+
"cell_type": "code",
|
292 |
+
"execution_count": 11,
|
293 |
+
"id": "ac715ca5-b67c-48fd-910c-0cde68efe5b3",
|
294 |
+
"metadata": {},
|
295 |
+
"outputs": [],
|
296 |
+
"source": [
|
297 |
+
"vocab_dict = {char: idx for idx, char in enumerate(sorted(vocab_set))}\n",
|
298 |
+
"\n",
|
299 |
+
"# Replace space with word delimiter token '|'\n",
|
300 |
+
"vocab_dict['|'] = vocab_dict[' ']\n",
|
301 |
+
"del vocab_dict[' ']\n",
|
302 |
+
"\n",
|
303 |
+
"# Add special tokens\n",
|
304 |
+
"vocab_dict['[UNK]'] = len(vocab_dict)\n",
|
305 |
+
"vocab_dict['[PAD]'] = len(vocab_dict)\n",
|
306 |
+
"\n",
|
307 |
+
"# Save the vocabulary dictionary to a JSON file\n",
|
308 |
+
"with open('vocab.json', 'w') as vocab_file:\n",
|
309 |
+
" json.dump(vocab_dict, vocab_file)\n"
|
310 |
+
]
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"cell_type": "code",
|
314 |
+
"execution_count": 12,
|
315 |
+
"id": "cc3d9d11-9e2d-4d5e-838a-f4ff0da2f6e9",
|
316 |
+
"metadata": {},
|
317 |
+
"outputs": [
|
318 |
+
{
|
319 |
+
"name": "stderr",
|
320 |
+
"output_type": "stream",
|
321 |
+
"text": [
|
322 |
+
"/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:1617: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be deprecated in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
|
323 |
+
" warnings.warn(\n"
|
324 |
+
]
|
325 |
+
}
|
326 |
+
],
|
327 |
+
"source": [
|
328 |
+
"tokenizer = Wav2Vec2CTCTokenizer(\n",
|
329 |
+
" 'vocab.json',\n",
|
330 |
+
" unk_token='[UNK]',\n",
|
331 |
+
" pad_token='[PAD]',\n",
|
332 |
+
" word_delimiter_token='|'\n",
|
333 |
+
")\n",
|
334 |
+
"\n",
|
335 |
+
"feature_extractor = Wav2Vec2FeatureExtractor(\n",
|
336 |
+
" feature_size=1,\n",
|
337 |
+
" sampling_rate=16_000,\n",
|
338 |
+
" padding_value=0.0,\n",
|
339 |
+
" do_normalize=True,\n",
|
340 |
+
" return_attention_mask=True\n",
|
341 |
+
")\n",
|
342 |
+
"\n",
|
343 |
+
"processor = Wav2Vec2Processor(\n",
|
344 |
+
" feature_extractor=feature_extractor,\n",
|
345 |
+
" tokenizer=tokenizer\n",
|
346 |
+
")\n"
|
347 |
+
]
|
348 |
+
},
|
349 |
+
{
|
350 |
+
"cell_type": "code",
|
351 |
+
"execution_count": 13,
|
352 |
+
"id": "4968b962-9074-48e8-a073-99cea7a914f9",
|
353 |
+
"metadata": {},
|
354 |
+
"outputs": [],
|
355 |
+
"source": [
|
356 |
+
"def prepare_dataset(batch):\n",
|
357 |
+
" # Process audio\n",
|
358 |
+
" audio = batch['audio']\n",
|
359 |
+
" batch['input_values'] = processor(\n",
|
360 |
+
" audio['array'],\n",
|
361 |
+
" sampling_rate=audio['sampling_rate']\n",
|
362 |
+
" ).input_values[0]\n",
|
363 |
+
" \n",
|
364 |
+
" # Process transcript\n",
|
365 |
+
" batch['labels'] = processor(\n",
|
366 |
+
" text=batch['sentence']\n",
|
367 |
+
" ).input_ids\n",
|
368 |
+
" return batch\n"
|
369 |
+
]
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"cell_type": "code",
|
373 |
+
"execution_count": 14,
|
374 |
+
"id": "1247f52a-29d3-486b-9636-5065cade7a9a",
|
375 |
+
"metadata": {},
|
376 |
+
"outputs": [],
|
377 |
+
"source": [
|
378 |
+
"train_dataset = train_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n",
|
379 |
+
"eval_dataset = eval_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n",
|
380 |
+
"test_dataset = test_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n"
|
381 |
+
]
|
382 |
+
},
|
383 |
+
{
|
384 |
+
"cell_type": "code",
|
385 |
+
"execution_count": 15,
|
386 |
+
"id": "298543b1-3795-4c03-8a4e-4802d0552f03",
|
387 |
+
"metadata": {},
|
388 |
+
"outputs": [],
|
389 |
+
"source": [
|
390 |
+
"@dataclass\n",
|
391 |
+
"class DataCollatorCTCWithPadding:\n",
|
392 |
+
" processor: Wav2Vec2Processor\n",
|
393 |
+
" padding: Union[bool, str] = True\n",
|
394 |
+
"\n",
|
395 |
+
" def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n",
|
396 |
+
" # Split inputs and labels\n",
|
397 |
+
" input_features = [{'input_values': feature['input_values']} for feature in features]\n",
|
398 |
+
" label_features = [{'input_ids': feature['labels']} for feature in features]\n",
|
399 |
+
"\n",
|
400 |
+
" # Pad inputs\n",
|
401 |
+
" batch = self.processor.pad(\n",
|
402 |
+
" input_features,\n",
|
403 |
+
" padding=self.padding,\n",
|
404 |
+
" return_tensors='pt'\n",
|
405 |
+
" )\n",
|
406 |
+
"\n",
|
407 |
+
" # Pad labels\n",
|
408 |
+
" labels_batch = self.processor.pad(\n",
|
409 |
+
" labels=label_features,\n",
|
410 |
+
" padding=self.padding,\n",
|
411 |
+
" return_tensors='pt'\n",
|
412 |
+
" )\n",
|
413 |
+
"\n",
|
414 |
+
" # Replace padding with -100 to ignore loss calculation for padding\n",
|
415 |
+
" labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)\n",
|
416 |
+
"\n",
|
417 |
+
" batch['labels'] = labels\n",
|
418 |
+
"\n",
|
419 |
+
" return batch\n",
|
420 |
+
"\n",
|
421 |
+
"data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)\n"
|
422 |
+
]
|
423 |
+
},
|
424 |
+
{
|
425 |
+
"cell_type": "code",
|
426 |
+
"execution_count": 16,
|
427 |
+
"id": "711c51ad-266b-421e-80aa-0c631a72d4e2",
|
428 |
+
"metadata": {},
|
429 |
+
"outputs": [],
|
430 |
+
"source": [
|
431 |
+
"wer_metric = evaluate.load('wer')\n",
|
432 |
+
"cer_metric = evaluate.load('cer')\n"
|
433 |
+
]
|
434 |
+
},
|
435 |
+
{
|
436 |
+
"cell_type": "code",
|
437 |
+
"execution_count": 17,
|
438 |
+
"id": "120b8c11-16e4-4cce-9dd3-fd31684c6959",
|
439 |
+
"metadata": {},
|
440 |
+
"outputs": [],
|
441 |
+
"source": [
|
442 |
+
"def compute_metrics(pred):\n",
|
443 |
+
" pred_logits = pred.predictions\n",
|
444 |
+
" pred_ids = np.argmax(pred_logits, axis=-1)\n",
|
445 |
+
"\n",
|
446 |
+
" # Replace -100 in labels as we can't decode them\n",
|
447 |
+
" label_ids = pred.label_ids\n",
|
448 |
+
" label_ids[label_ids == -100] = processor.tokenizer.pad_token_id\n",
|
449 |
+
"\n",
|
450 |
+
" # Decode predictions and references\n",
|
451 |
+
" pred_str = processor.batch_decode(pred_ids)\n",
|
452 |
+
" label_str = processor.batch_decode(label_ids, group_tokens=False)\n",
|
453 |
+
"\n",
|
454 |
+
" wer = wer_metric.compute(predictions=pred_str, references=label_str)\n",
|
455 |
+
" cer = cer_metric.compute(predictions=pred_str, references=label_str)\n",
|
456 |
+
"\n",
|
457 |
+
" return {'wer': wer, 'cer': cer}\n"
|
458 |
+
]
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"cell_type": "code",
|
462 |
+
"execution_count": 18,
|
463 |
+
"id": "a1fa4aed-40f8-451d-aea2-9dd200b2d215",
|
464 |
+
"metadata": {},
|
465 |
+
"outputs": [
|
466 |
+
{
|
467 |
+
"name": "stderr",
|
468 |
+
"output_type": "stream",
|
469 |
+
"text": [
|
470 |
+
"/usr/local/lib/python3.10/dist-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()\n",
|
471 |
+
" return self.fget.__get__(instance, owner)()\n",
|
472 |
+
"Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-xls-r-300m and are newly initialized: ['lm_head.bias', 'lm_head.weight']\n",
|
473 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
474 |
+
]
|
475 |
+
}
|
476 |
+
],
|
477 |
+
"source": [
|
478 |
+
"model_checkpoint = \"facebook/wav2vec2-xls-r-300m\" # You can use a different checkpoint if desired\n",
|
479 |
+
"\n",
|
480 |
+
"model = Wav2Vec2ForCTC.from_pretrained(\n",
|
481 |
+
" model_checkpoint,\n",
|
482 |
+
" attention_dropout=0.1,\n",
|
483 |
+
" hidden_dropout=0.1,\n",
|
484 |
+
" feat_proj_dropout=0.0,\n",
|
485 |
+
" mask_time_prob=0.05,\n",
|
486 |
+
" layerdrop=0.1,\n",
|
487 |
+
" ctc_loss_reduction='mean',\n",
|
488 |
+
" pad_token_id=processor.tokenizer.pad_token_id,\n",
|
489 |
+
" vocab_size=len(processor.tokenizer),\n",
|
490 |
+
")\n"
|
491 |
+
]
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"cell_type": "code",
|
495 |
+
"execution_count": 19,
|
496 |
+
"id": "dc4e2945-ad0d-486b-a8a3-bb712ad9a63c",
|
497 |
+
"metadata": {},
|
498 |
+
"outputs": [],
|
499 |
+
"source": [
|
500 |
+
"model.freeze_feature_encoder()\n"
|
501 |
+
]
|
502 |
+
},
|
503 |
+
{
|
504 |
+
"cell_type": "code",
|
505 |
+
"execution_count": 20,
|
506 |
+
"id": "42fecc4a-9eaa-49a8-8b2a-1087dc50d04c",
|
507 |
+
"metadata": {},
|
508 |
+
"outputs": [
|
509 |
+
{
|
510 |
+
"data": {
|
511 |
+
"text/plain": [
|
512 |
+
"20"
|
513 |
+
]
|
514 |
+
},
|
515 |
+
"execution_count": 20,
|
516 |
+
"metadata": {},
|
517 |
+
"output_type": "execute_result"
|
518 |
+
}
|
519 |
+
],
|
520 |
+
"source": [
|
521 |
+
"round(sum(train_dataset['audio_len']) / 3600)"
|
522 |
+
]
|
523 |
+
},
|
524 |
+
{
|
525 |
+
"cell_type": "code",
|
526 |
+
"execution_count": 21,
|
527 |
+
"id": "d26ab602-0247-4e5c-8362-e01ea7de41b0",
|
528 |
+
"metadata": {},
|
529 |
+
"outputs": [
|
530 |
+
{
|
531 |
+
"name": "stderr",
|
532 |
+
"output_type": "stream",
|
533 |
+
"text": [
|
534 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Using wandb-core as the SDK backend. Please refer to https://wandb.me/wandb-core for more information.\n",
|
535 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33msulaiman-kagumire\u001b[0m (\u001b[33masr-africa-research-team\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
|
536 |
+
]
|
537 |
+
},
|
538 |
+
{
|
539 |
+
"data": {
|
540 |
+
"text/html": [
|
541 |
+
"Tracking run with wandb version 0.18.3"
|
542 |
+
],
|
543 |
+
"text/plain": [
|
544 |
+
"<IPython.core.display.HTML object>"
|
545 |
+
]
|
546 |
+
},
|
547 |
+
"metadata": {},
|
548 |
+
"output_type": "display_data"
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"data": {
|
552 |
+
"text/html": [
|
553 |
+
"Run data is saved locally in <code>/workspace/wandb/run-20241006_174814-89bkm5a5</code>"
|
554 |
+
],
|
555 |
+
"text/plain": [
|
556 |
+
"<IPython.core.display.HTML object>"
|
557 |
+
]
|
558 |
+
},
|
559 |
+
"metadata": {},
|
560 |
+
"output_type": "display_data"
|
561 |
+
},
|
562 |
+
{
|
563 |
+
"data": {
|
564 |
+
"text/html": [
|
565 |
+
"Syncing run <strong><a href='https://wandb.ai/asr-africa-research-team/ASR%20Africa/runs/89bkm5a5' target=\"_blank\">wav2vec2-xls-r-300m-yogera-lg-20hrs-v1</a></strong> to <a href='https://wandb.ai/asr-africa-research-team/ASR%20Africa' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
|
566 |
+
],
|
567 |
+
"text/plain": [
|
568 |
+
"<IPython.core.display.HTML object>"
|
569 |
+
]
|
570 |
+
},
|
571 |
+
"metadata": {},
|
572 |
+
"output_type": "display_data"
|
573 |
+
},
|
574 |
+
{
|
575 |
+
"data": {
|
576 |
+
"text/html": [
|
577 |
+
" View project at <a href='https://wandb.ai/asr-africa-research-team/ASR%20Africa' target=\"_blank\">https://wandb.ai/asr-africa-research-team/ASR%20Africa</a>"
|
578 |
+
],
|
579 |
+
"text/plain": [
|
580 |
+
"<IPython.core.display.HTML object>"
|
581 |
+
]
|
582 |
+
},
|
583 |
+
"metadata": {},
|
584 |
+
"output_type": "display_data"
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"data": {
|
588 |
+
"text/html": [
|
589 |
+
" View run at <a href='https://wandb.ai/asr-africa-research-team/ASR%20Africa/runs/89bkm5a5' target=\"_blank\">https://wandb.ai/asr-africa-research-team/ASR%20Africa/runs/89bkm5a5</a>"
|
590 |
+
],
|
591 |
+
"text/plain": [
|
592 |
+
"<IPython.core.display.HTML object>"
|
593 |
+
]
|
594 |
+
},
|
595 |
+
"metadata": {},
|
596 |
+
"output_type": "display_data"
|
597 |
+
},
|
598 |
+
{
|
599 |
+
"data": {
|
600 |
+
"text/html": [
|
601 |
+
"<button onClick=\"this.nextSibling.style.display='block';this.style.display='none';\">Display W&B run</button><iframe src='https://wandb.ai/asr-africa-research-team/ASR%20Africa/runs/89bkm5a5?jupyter=true' style='border:none;width:100%;height:420px;display:none;'></iframe>"
|
602 |
+
],
|
603 |
+
"text/plain": [
|
604 |
+
"<wandb.sdk.wandb_run.Run at 0x7f5927f97eb0>"
|
605 |
+
]
|
606 |
+
},
|
607 |
+
"execution_count": 21,
|
608 |
+
"metadata": {},
|
609 |
+
"output_type": "execute_result"
|
610 |
+
}
|
611 |
+
],
|
612 |
+
"source": [
|
613 |
+
"# Define your dataset and experiment details\n",
|
614 |
+
"dataset = 'yogera' # Replace with actual dataset name\n",
|
615 |
+
"language = 'lg' # Replace with the language you are working with\n",
|
616 |
+
"sample_hours = round(sum(train_dataset['audio_len']) / 3600) # Calculate total training hours\n",
|
617 |
+
"version = 'v1' # Version of your fine-tuned model\n",
|
618 |
+
"batch_size = 8 # Adjust based on your GPU capacity\n",
|
619 |
+
"grad_acc = 1\n",
|
620 |
+
"eval_batch_size = batch_size // 2\n",
|
621 |
+
"epochs = 100 # Adjust as needed\n",
|
622 |
+
"output_dir = f\"{model_checkpoint.split('/')[-1]}-{dataset}-{language}-{sample_hours}hrs-{version}\"\n",
|
623 |
+
"\n",
|
624 |
+
"# Initialize WandB\n",
|
625 |
+
"wandb.init(\n",
|
626 |
+
" project=\"ASR Africa\",\n",
|
627 |
+
" entity=\"asr-africa-research-team\",\n",
|
628 |
+
" name=output_dir,\n",
|
629 |
+
")\n"
|
630 |
+
]
|
631 |
+
},
|
632 |
+
{
|
633 |
+
"cell_type": "code",
|
634 |
+
"execution_count": null,
|
635 |
+
"id": "28e0779e-056a-46d9-8ff3-ce4411a8f005",
|
636 |
+
"metadata": {},
|
637 |
+
"outputs": [],
|
638 |
+
"source": []
|
639 |
+
},
|
640 |
+
{
|
641 |
+
"cell_type": "code",
|
642 |
+
"execution_count": 31,
|
643 |
+
"id": "33288226-5d7d-4a67-bee1-8fd3bc0b6896",
|
644 |
+
"metadata": {},
|
645 |
+
"outputs": [],
|
646 |
+
"source": [
|
647 |
+
"training_args = TrainingArguments(\n",
|
648 |
+
" output_dir=output_dir,\n",
|
649 |
+
" group_by_length=True,\n",
|
650 |
+
" per_device_train_batch_size=batch_size,\n",
|
651 |
+
" per_device_eval_batch_size=eval_batch_size,\n",
|
652 |
+
" gradient_accumulation_steps=grad_acc,\n",
|
653 |
+
" eval_strategy=\"epoch\",\n",
|
654 |
+
" logging_strategy=\"epoch\",\n",
|
655 |
+
" save_strategy=\"epoch\",\n",
|
656 |
+
" num_train_epochs=epochs,\n",
|
657 |
+
" gradient_checkpointing=True,\n",
|
658 |
+
" fp16=True,\n",
|
659 |
+
" learning_rate=1e-4,\n",
|
660 |
+
" lr_scheduler_type='linear',\n",
|
661 |
+
" warmup_ratio=0.1,\n",
|
662 |
+
" save_total_limit=2,\n",
|
663 |
+
" load_best_model_at_end=True,\n",
|
664 |
+
" metric_for_best_model=\"wer\",\n",
|
665 |
+
" greater_is_better=False,\n",
|
666 |
+
" optim='adamw_torch',\n",
|
667 |
+
" push_to_hub=True,\n",
|
668 |
+
" hub_model_id=f\"asr-africa/{output_dir}\",\n",
|
669 |
+
" hub_private_repo=True,\n",
|
670 |
+
" dataloader_num_workers=num_dataloaders,\n",
|
671 |
+
")"
|
672 |
+
]
|
673 |
+
},
|
674 |
+
{
|
675 |
+
"cell_type": "code",
|
676 |
+
"execution_count": 32,
|
677 |
+
"id": "14ae6f20-47bc-4309-abb2-f5034828143b",
|
678 |
+
"metadata": {},
|
679 |
+
"outputs": [
|
680 |
+
{
|
681 |
+
"name": "stderr",
|
682 |
+
"output_type": "stream",
|
683 |
+
"text": [
|
684 |
+
"Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
|
685 |
+
]
|
686 |
+
}
|
687 |
+
],
|
688 |
+
"source": [
|
689 |
+
"trainer = Trainer(\n",
|
690 |
+
" model=model,\n",
|
691 |
+
" data_collator=data_collator,\n",
|
692 |
+
" args=training_args,\n",
|
693 |
+
" compute_metrics=compute_metrics,\n",
|
694 |
+
" train_dataset=train_dataset,\n",
|
695 |
+
" eval_dataset=eval_dataset,\n",
|
696 |
+
" tokenizer=processor.feature_extractor,\n",
|
697 |
+
" callbacks=[\n",
|
698 |
+
" EarlyStoppingCallback(\n",
|
699 |
+
" early_stopping_patience=10, # Stop if no improvement after 10 evaluation steps\n",
|
700 |
+
" early_stopping_threshold=1e-3 # Stop if improvement is less than 0.001\n",
|
701 |
+
" )\n",
|
702 |
+
" ],\n",
|
703 |
+
")\n"
|
704 |
+
]
|
705 |
+
},
|
706 |
+
{
|
707 |
+
"cell_type": "code",
|
708 |
+
"execution_count": 33,
|
709 |
+
"id": "15b96745-c534-4638-84b3-2b75037bc81c",
|
710 |
+
"metadata": {
|
711 |
+
"scrolled": true
|
712 |
+
},
|
713 |
+
"outputs": [
|
714 |
+
{
|
715 |
+
"name": "stderr",
|
716 |
+
"output_type": "stream",
|
717 |
+
"text": [
|
718 |
+
"\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.\n"
|
719 |
+
]
|
720 |
+
},
|
721 |
+
{
|
722 |
+
"data": {
|
723 |
+
"text/html": [
|
724 |
+
"\n",
|
725 |
+
" <div>\n",
|
726 |
+
" \n",
|
727 |
+
" <progress value='561' max='39400' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
728 |
+
" [ 561/39400 15:15 < 17:40:28, 0.61 it/s, Epoch 1.42/100]\n",
|
729 |
+
" </div>\n",
|
730 |
+
" <table border=\"1\" class=\"dataframe\">\n",
|
731 |
+
" <thead>\n",
|
732 |
+
" <tr style=\"text-align: left;\">\n",
|
733 |
+
" <th>Epoch</th>\n",
|
734 |
+
" <th>Training Loss</th>\n",
|
735 |
+
" <th>Validation Loss</th>\n",
|
736 |
+
" <th>Wer</th>\n",
|
737 |
+
" <th>Cer</th>\n",
|
738 |
+
" </tr>\n",
|
739 |
+
" </thead>\n",
|
740 |
+
" <tbody>\n",
|
741 |
+
" <tr>\n",
|
742 |
+
" <td>1</td>\n",
|
743 |
+
" <td>13.121900</td>\n",
|
744 |
+
" <td>4.382024</td>\n",
|
745 |
+
" <td>1.000000</td>\n",
|
746 |
+
" <td>1.000000</td>\n",
|
747 |
+
" </tr>\n",
|
748 |
+
" </tbody>\n",
|
749 |
+
"</table><p>"
|
750 |
+
],
|
751 |
+
"text/plain": [
|
752 |
+
"<IPython.core.display.HTML object>"
|
753 |
+
]
|
754 |
+
},
|
755 |
+
"metadata": {},
|
756 |
+
"output_type": "display_data"
|
757 |
+
},
|
758 |
+
{
|
759 |
+
"ename": "KeyboardInterrupt",
|
760 |
+
"evalue": "",
|
761 |
+
"output_type": "error",
|
762 |
+
"traceback": [
|
763 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
764 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
765 |
+
"Cell \u001b[0;32mIn[33], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
766 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:2043\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m 2040\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2041\u001b[0m \u001b[38;5;66;03m# Disable progress bars when uploading models during checkpoints to avoid polluting stdout\u001b[39;00m\n\u001b[1;32m 2042\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39mdisable_progress_bars()\n\u001b[0;32m-> 2043\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minner_training_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2044\u001b[0m \u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2045\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2046\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2047\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2048\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2049\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 2050\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39menable_progress_bars()\n",
|
767 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:2388\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2385\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcallback_handler\u001b[38;5;241m.\u001b[39mon_step_begin(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol)\n\u001b[1;32m 2387\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maccelerator\u001b[38;5;241m.\u001b[39maccumulate(model):\n\u001b[0;32m-> 2388\u001b[0m tr_loss_step \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining_step\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2390\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m 2391\u001b[0m args\u001b[38;5;241m.\u001b[39mlogging_nan_inf_filter\n\u001b[1;32m 2392\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_torch_xla_available()\n\u001b[1;32m 2393\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m (torch\u001b[38;5;241m.\u001b[39misnan(tr_loss_step) \u001b[38;5;129;01mor\u001b[39;00m torch\u001b[38;5;241m.\u001b[39misinf(tr_loss_step))\n\u001b[1;32m 2394\u001b[0m ):\n\u001b[1;32m 2395\u001b[0m \u001b[38;5;66;03m# if loss is nan or inf simply add the average of previous logged losses\u001b[39;00m\n\u001b[1;32m 2396\u001b[0m tr_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m tr_loss \u001b[38;5;241m/\u001b[39m (\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mglobal_step \u001b[38;5;241m-\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_globalstep_last_logged)\n",
|
768 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:3518\u001b[0m, in \u001b[0;36mTrainer.training_step\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 3516\u001b[0m scaled_loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m 3517\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3518\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maccelerator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloss\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m loss\u001b[38;5;241m.\u001b[39mdetach() \u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mgradient_accumulation_steps\n",
|
769 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py:2192\u001b[0m, in \u001b[0;36mAccelerator.backward\u001b[0;34m(self, loss, **kwargs)\u001b[0m\n\u001b[1;32m 2190\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m 2191\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mscaler \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 2192\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscaler\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscale\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloss\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2193\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m learning_rate \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhas_lomo_optimizer:\n\u001b[1;32m 2194\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlomo_backward(loss, learning_rate)\n",
|
770 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 482\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 483\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m 484\u001b[0m Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m 485\u001b[0m (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 490\u001b[0m inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m 491\u001b[0m )\n\u001b[0;32m--> 492\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 493\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m 494\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
|
771 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/autograd/__init__.py:251\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 246\u001b[0m retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m 248\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m 250\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 251\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m 252\u001b[0m \u001b[43m \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 253\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 254\u001b[0m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 255\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 256\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 257\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 258\u001b[0m \u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 259\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
|
772 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
773 |
+
]
|
774 |
+
}
|
775 |
+
],
|
776 |
+
"source": [
|
777 |
+
"trainer.train()"
|
778 |
+
]
|
779 |
+
},
|
780 |
+
{
|
781 |
+
"cell_type": "code",
|
782 |
+
"execution_count": null,
|
783 |
+
"id": "9fad91de-1d1a-4de9-9049-e8048a179746",
|
784 |
+
"metadata": {},
|
785 |
+
"outputs": [],
|
786 |
+
"source": [
|
787 |
+
"from huggingface_hub import HfApi, HfFolder\n",
|
788 |
+
"\n",
|
789 |
+
"# Your Hugging Face credentials\n",
|
790 |
+
"repo_id = \"sulaimank/yog-transcribed\" # Replace with your actual Hugging Face repository\n",
|
791 |
+
"zip_file = \"transcribed_wavs.zip\"\n",
|
792 |
+
"\n",
|
793 |
+
"# Initialize API and upload\n",
|
794 |
+
"api = HfApi()\n",
|
795 |
+
"api.upload_file(\n",
|
796 |
+
" path_or_fileobj=zip_file,\n",
|
797 |
+
" path_in_repo=zip_file, # Path where the file will be stored in the repo\n",
|
798 |
+
" repo_id=repo_id,\n",
|
799 |
+
" repo_type=\"dataset\", # This is a dataset repository\n",
|
800 |
+
")\n",
|
801 |
+
"\n",
|
802 |
+
"print(f\"Uploaded {zip_file} to Hugging Face at {repo_id}\")\n"
|
803 |
+
]
|
804 |
+
},
|
805 |
+
{
|
806 |
+
"cell_type": "code",
|
807 |
+
"execution_count": null,
|
808 |
+
"id": "e6e82f64-305a-476b-8bc2-563f628fb720",
|
809 |
+
"metadata": {},
|
810 |
+
"outputs": [],
|
811 |
+
"source": []
|
812 |
+
},
|
813 |
+
{
|
814 |
+
"cell_type": "code",
|
815 |
+
"execution_count": null,
|
816 |
+
"id": "483bb56b-f337-4f97-a371-983f699e977d",
|
817 |
+
"metadata": {},
|
818 |
+
"outputs": [],
|
819 |
+
"source": []
|
820 |
+
},
|
821 |
+
{
|
822 |
+
"cell_type": "code",
|
823 |
+
"execution_count": null,
|
824 |
+
"id": "671806dc-daab-49ca-8c61-07a12b61ea5b",
|
825 |
+
"metadata": {},
|
826 |
+
"outputs": [],
|
827 |
+
"source": []
|
828 |
+
},
|
829 |
+
{
|
830 |
+
"cell_type": "code",
|
831 |
+
"execution_count": null,
|
832 |
+
"id": "4a3ec029-6154-44b3-bd61-ff7b63476407",
|
833 |
+
"metadata": {},
|
834 |
+
"outputs": [],
|
835 |
+
"source": []
|
836 |
+
},
|
837 |
+
{
|
838 |
+
"cell_type": "code",
|
839 |
+
"execution_count": null,
|
840 |
+
"id": "19424ca3-aff1-478e-bb3d-67662954ab4f",
|
841 |
+
"metadata": {},
|
842 |
+
"outputs": [],
|
843 |
+
"source": []
|
844 |
+
},
|
845 |
+
{
|
846 |
+
"cell_type": "code",
|
847 |
+
"execution_count": null,
|
848 |
+
"id": "8d099ea1-4e3c-461d-bac0-77007f8a1915",
|
849 |
+
"metadata": {},
|
850 |
+
"outputs": [],
|
851 |
+
"source": []
|
852 |
+
},
|
853 |
+
{
|
854 |
+
"cell_type": "code",
|
855 |
+
"execution_count": null,
|
856 |
+
"id": "b5d56e60-6a1b-4456-99c2-efc43a39b67b",
|
857 |
+
"metadata": {},
|
858 |
+
"outputs": [],
|
859 |
+
"source": []
|
860 |
+
},
|
861 |
+
{
|
862 |
+
"cell_type": "code",
|
863 |
+
"execution_count": null,
|
864 |
+
"id": "c9703c85-d652-4323-a86c-a2cac93aab98",
|
865 |
+
"metadata": {},
|
866 |
+
"outputs": [],
|
867 |
+
"source": []
|
868 |
+
},
|
869 |
+
{
|
870 |
+
"cell_type": "code",
|
871 |
+
"execution_count": null,
|
872 |
+
"id": "83b1729e-5c49-45ea-bb73-89fc31dcd27e",
|
873 |
+
"metadata": {},
|
874 |
+
"outputs": [],
|
875 |
+
"source": []
|
876 |
+
},
|
877 |
+
{
|
878 |
+
"cell_type": "code",
|
879 |
+
"execution_count": null,
|
880 |
+
"id": "010f08e4-f439-42d4-ae71-91a2f0f3d798",
|
881 |
+
"metadata": {},
|
882 |
+
"outputs": [],
|
883 |
+
"source": []
|
884 |
+
},
|
885 |
+
{
|
886 |
+
"cell_type": "code",
|
887 |
+
"execution_count": null,
|
888 |
+
"id": "1ed6774d-ce15-4f1f-af6c-8130ffb6147a",
|
889 |
+
"metadata": {},
|
890 |
+
"outputs": [],
|
891 |
+
"source": []
|
892 |
+
},
|
893 |
+
{
|
894 |
+
"cell_type": "code",
|
895 |
+
"execution_count": null,
|
896 |
+
"id": "d39f5624-ea0a-48c2-87f6-4ca52af85746",
|
897 |
+
"metadata": {},
|
898 |
+
"outputs": [],
|
899 |
+
"source": []
|
900 |
+
},
|
901 |
+
{
|
902 |
+
"cell_type": "code",
|
903 |
+
"execution_count": null,
|
904 |
+
"id": "af545751-1d0d-4615-b1b0-c7c24015525a",
|
905 |
+
"metadata": {},
|
906 |
+
"outputs": [],
|
907 |
+
"source": []
|
908 |
+
},
|
909 |
+
{
|
910 |
+
"cell_type": "code",
|
911 |
+
"execution_count": null,
|
912 |
+
"id": "1f40129c-21a5-4585-9901-f763c55077eb",
|
913 |
+
"metadata": {},
|
914 |
+
"outputs": [],
|
915 |
+
"source": []
|
916 |
+
},
|
917 |
+
{
|
918 |
+
"cell_type": "code",
|
919 |
+
"execution_count": null,
|
920 |
+
"id": "813ec0ca-f2e6-4bcf-b456-40aead7fc864",
|
921 |
+
"metadata": {},
|
922 |
+
"outputs": [],
|
923 |
+
"source": []
|
924 |
+
},
|
925 |
+
{
|
926 |
+
"cell_type": "code",
|
927 |
+
"execution_count": null,
|
928 |
+
"id": "c73f86f2-4ddb-4ab6-a996-f8b73036940e",
|
929 |
+
"metadata": {},
|
930 |
+
"outputs": [],
|
931 |
+
"source": []
|
932 |
+
},
|
933 |
+
{
|
934 |
+
"cell_type": "code",
|
935 |
+
"execution_count": null,
|
936 |
+
"id": "cc21d46c-c173-4010-854c-991507c7fa72",
|
937 |
+
"metadata": {},
|
938 |
+
"outputs": [],
|
939 |
+
"source": []
|
940 |
+
},
|
941 |
+
{
|
942 |
+
"cell_type": "code",
|
943 |
+
"execution_count": null,
|
944 |
+
"id": "264311e4-4444-48b4-94db-d2b055375fad",
|
945 |
+
"metadata": {},
|
946 |
+
"outputs": [],
|
947 |
+
"source": []
|
948 |
+
},
|
949 |
+
{
|
950 |
+
"cell_type": "code",
|
951 |
+
"execution_count": null,
|
952 |
+
"id": "0808a008-5b6b-4c0b-b553-bf2f24190c22",
|
953 |
+
"metadata": {},
|
954 |
+
"outputs": [],
|
955 |
+
"source": []
|
956 |
+
},
|
957 |
+
{
|
958 |
+
"cell_type": "code",
|
959 |
+
"execution_count": null,
|
960 |
+
"id": "f55aed91-d9ce-4c0d-ac91-e8923858b07f",
|
961 |
+
"metadata": {},
|
962 |
+
"outputs": [],
|
963 |
+
"source": []
|
964 |
+
},
|
965 |
+
{
|
966 |
+
"cell_type": "code",
|
967 |
+
"execution_count": null,
|
968 |
+
"id": "9af5b78c-ff5f-497d-8e7d-50cc920daa5c",
|
969 |
+
"metadata": {},
|
970 |
+
"outputs": [],
|
971 |
+
"source": []
|
972 |
+
},
|
973 |
+
{
|
974 |
+
"cell_type": "code",
|
975 |
+
"execution_count": null,
|
976 |
+
"id": "50307ccb-485b-4349-9041-94e11c804372",
|
977 |
+
"metadata": {},
|
978 |
+
"outputs": [],
|
979 |
+
"source": []
|
980 |
+
},
|
981 |
+
{
|
982 |
+
"cell_type": "code",
|
983 |
+
"execution_count": null,
|
984 |
+
"id": "8d1a48a6-a3c2-4725-8c12-4ad4a77af06f",
|
985 |
+
"metadata": {},
|
986 |
+
"outputs": [],
|
987 |
+
"source": []
|
988 |
+
},
|
989 |
+
{
|
990 |
+
"cell_type": "code",
|
991 |
+
"execution_count": null,
|
992 |
+
"id": "93a4c4fc-8c76-4aa9-baf1-419542acd3f4",
|
993 |
+
"metadata": {},
|
994 |
+
"outputs": [],
|
995 |
+
"source": []
|
996 |
+
},
|
997 |
+
{
|
998 |
+
"cell_type": "code",
|
999 |
+
"execution_count": null,
|
1000 |
+
"id": "b04c5443-48b8-4b21-8dd7-ba351fd5475b",
|
1001 |
+
"metadata": {},
|
1002 |
+
"outputs": [],
|
1003 |
+
"source": []
|
1004 |
+
},
|
1005 |
+
{
|
1006 |
+
"cell_type": "code",
|
1007 |
+
"execution_count": null,
|
1008 |
+
"id": "8fe35b8a-0ecd-41b1-93e5-f677db733ab3",
|
1009 |
+
"metadata": {},
|
1010 |
+
"outputs": [],
|
1011 |
+
"source": []
|
1012 |
+
},
|
1013 |
+
{
|
1014 |
+
"cell_type": "code",
|
1015 |
+
"execution_count": null,
|
1016 |
+
"id": "721fa523-789e-4a27-9a2e-1464193244fd",
|
1017 |
+
"metadata": {},
|
1018 |
+
"outputs": [],
|
1019 |
+
"source": []
|
1020 |
+
},
|
1021 |
+
{
|
1022 |
+
"cell_type": "code",
|
1023 |
+
"execution_count": null,
|
1024 |
+
"id": "ef9b5c18-c264-4714-8b3d-5e015e5d89b2",
|
1025 |
+
"metadata": {},
|
1026 |
+
"outputs": [],
|
1027 |
+
"source": []
|
1028 |
+
},
|
1029 |
+
{
|
1030 |
+
"cell_type": "code",
|
1031 |
+
"execution_count": null,
|
1032 |
+
"id": "feef3d76-cad8-4115-ba55-7dc5e99d638a",
|
1033 |
+
"metadata": {},
|
1034 |
+
"outputs": [],
|
1035 |
+
"source": []
|
1036 |
+
},
|
1037 |
+
{
|
1038 |
+
"cell_type": "code",
|
1039 |
+
"execution_count": null,
|
1040 |
+
"id": "807cffe5-39f2-42ae-8135-98c7a86cd88f",
|
1041 |
+
"metadata": {},
|
1042 |
+
"outputs": [],
|
1043 |
+
"source": []
|
1044 |
+
}
|
1045 |
+
],
|
1046 |
+
"metadata": {
|
1047 |
+
"kernelspec": {
|
1048 |
+
"display_name": "Python 3 (ipykernel)",
|
1049 |
+
"language": "python",
|
1050 |
+
"name": "python3"
|
1051 |
+
},
|
1052 |
+
"language_info": {
|
1053 |
+
"codemirror_mode": {
|
1054 |
+
"name": "ipython",
|
1055 |
+
"version": 3
|
1056 |
+
},
|
1057 |
+
"file_extension": ".py",
|
1058 |
+
"mimetype": "text/x-python",
|
1059 |
+
"name": "python",
|
1060 |
+
"nbconvert_exporter": "python",
|
1061 |
+
"pygments_lexer": "ipython3",
|
1062 |
+
"version": "3.10.12"
|
1063 |
+
}
|
1064 |
+
},
|
1065 |
+
"nbformat": 4,
|
1066 |
+
"nbformat_minor": 5
|
1067 |
+
}
|
xlsr-train.py
ADDED
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from datasets import load_dataset, Audio, concatenate_datasets
|
4 |
+
|
5 |
+
# Set processors (optional)
|
6 |
+
|
7 |
+
num_proc = os.cpu_count()//2
|
8 |
+
num_dataloaders = os.cpu_count()//2
|
9 |
+
|
10 |
+
print(f"Cpu count: {os.cpu_count()}\nNum proc: {num_proc}\nNum dataloaders: {num_dataloaders}")
|
11 |
+
|
12 |
+
# Load datasets
|
13 |
+
|
14 |
+
train = load_dataset()
|
15 |
+
dev = load_dataset()
|
16 |
+
test = load_dataset()
|
17 |
+
|
18 |
+
import unicodedata
|
19 |
+
import re
|
20 |
+
|
21 |
+
def preprocess_text(batch):
|
22 |
+
# Convert to lowercase
|
23 |
+
batch['sentence'] = batch['sentence'].lower()
|
24 |
+
|
25 |
+
# Normalize text
|
26 |
+
batch['sentence'] = unicodedata.normalize('NFKC', batch['sentence'])
|
27 |
+
batch['sentence'] = re.sub(r'[\’\ʻ\ʼ\ʽ\‘]', "'", batch['sentence'])
|
28 |
+
|
29 |
+
# Remove punctuation and special characters
|
30 |
+
batch['sentence'] = re.sub(r'[^\w\s\']', '', batch['sentence'])
|
31 |
+
batch['sentence'] = re.sub(r'_', ' ', batch['sentence'])
|
32 |
+
|
33 |
+
# Remove excessive whitespace
|
34 |
+
batch['sentence'] = ' '.join(batch['sentence'].split())
|
35 |
+
|
36 |
+
return batch
|
37 |
+
|
38 |
+
import librosa
|
39 |
+
import numpy as np
|
40 |
+
|
41 |
+
def get_lens(batch):
|
42 |
+
try:
|
43 |
+
audio_len = librosa.get_duration(y=batch['audio']['array'], sr=batch['audio']['sampling_rate'])
|
44 |
+
except:
|
45 |
+
del batch['audio']
|
46 |
+
batch['audio'] = None
|
47 |
+
audio_len = 0.0
|
48 |
+
transcript_len = len(batch['sentence'])
|
49 |
+
batch['audio_len'] = audio_len
|
50 |
+
batch['transcript_len'] = transcript_len
|
51 |
+
batch['len_ratio'] = float(audio_len)/float(transcript_len)
|
52 |
+
batch['num_feature_vecs'] = int(np.round(audio_len * 1000 / 20))
|
53 |
+
return batch
|
54 |
+
|
55 |
+
transcript_len = len(batch['sentence'])
|
56 |
+
|
57 |
+
batch['audio_len'] = audio_len
|
58 |
+
batch['transcript_len'] = transcript_len
|
59 |
+
batch['len_ratio'] = float(audio_len)/float(transcript_len)
|
60 |
+
batch['num_feature_vecs'] = int(np.round(audio_len * 1000 / 20)) # seconds -> milliseconds, divide by 20 millisecond feature_win_step, round up to nearest int
|
61 |
+
|
62 |
+
return batch
|
63 |
+
|
64 |
+
def data_checks(batch):
|
65 |
+
audio_check = (batch['audio_len']>1.0 and batch['audio_len']<30.0)
|
66 |
+
transcript_check = (batch['transcript_len']>10)
|
67 |
+
|
68 |
+
input_output_ratio = float(batch['num_feature_vecs']) / float(batch['transcript_len'])
|
69 |
+
input_output_ratio_check = input_output_ratio>1.0 # CTC algorithm assumes the input is not shorter than the ouput
|
70 |
+
|
71 |
+
return (audio_check and transcript_check and input_output_ratio_check)
|
72 |
+
|
73 |
+
train = train.map(preprocess_text, num_proc=num_proc)
|
74 |
+
dev = dev.map(preprocess_text, num_proc=num_proc)
|
75 |
+
|
76 |
+
try:
|
77 |
+
train = train.map(get_lens, num_proc=num_proc)
|
78 |
+
except:
|
79 |
+
train = train.map(get_lens, num_proc=4)
|
80 |
+
|
81 |
+
try:
|
82 |
+
dev = dev.map(get_lens, num_proc=num_proc)
|
83 |
+
except:
|
84 |
+
dev = dev.map(get_lens, num_proc=4)
|
85 |
+
|
86 |
+
train = train.filter(data_checks, num_proc=num_proc)
|
87 |
+
dev = dev.filter(data_checks, num_proc=num_proc)
|
88 |
+
|
89 |
+
train_mean = np.mean(train['len_ratio'])
|
90 |
+
train_std = np.std(train['len_ratio'])
|
91 |
+
|
92 |
+
dev_mean = np.mean(dev['len_ratio'])
|
93 |
+
dev_std = np.std(dev['len_ratio'])
|
94 |
+
|
95 |
+
num_std_devs = 2
|
96 |
+
train = train.filter(lambda batch: (abs(batch['len_ratio'] - train_mean) - (num_std_devs * train_std)) <= 0, num_proc=num_proc)
|
97 |
+
dev = dev.filter(lambda batch: (abs(batch['len_ratio'] - dev_mean) - (num_std_devs * dev_std)) <= 0, num_proc=num_proc)
|
98 |
+
|
99 |
+
print(f"Train hours: {sum(train['audio_len'])/3600}\nDev hours: {sum(dev['audio_len'])/3600}")
|
100 |
+
|
101 |
+
train = train.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs'])
|
102 |
+
dev = dev.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs'])
|
103 |
+
|
104 |
+
alphabet = None # define the language's alphabet here e.g. " 'abcdefghijklmnorstuwyzƙƴɓɗ" for Hausa
|
105 |
+
|
106 |
+
alphabet = sorted(list(set(alphabet)))
|
107 |
+
|
108 |
+
vocab_dict = {v: k for k, v in enumerate(alphabet)}
|
109 |
+
|
110 |
+
vocab_dict["|"] = vocab_dict[" "]
|
111 |
+
del vocab_dict[" "]
|
112 |
+
|
113 |
+
vocab_dict["[UNK]"] = len(vocab_dict)
|
114 |
+
vocab_dict["[PAD]"] = len(vocab_dict)
|
115 |
+
|
116 |
+
import json
|
117 |
+
|
118 |
+
with open('vocab.json', 'w') as vocab_file:
|
119 |
+
json.dump(vocab_dict, vocab_file)
|
120 |
+
|
121 |
+
from transformers import Wav2Vec2CTCTokenizer
|
122 |
+
|
123 |
+
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("./", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|")
|
124 |
+
|
125 |
+
from transformers import Wav2Vec2FeatureExtractor
|
126 |
+
|
127 |
+
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)
|
128 |
+
|
129 |
+
from transformers import Wav2Vec2Processor
|
130 |
+
|
131 |
+
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
132 |
+
|
133 |
+
def prepare_dataset(batch):
|
134 |
+
audio = batch["audio"]
|
135 |
+
batch["input_values"] = processor(audio=audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
|
136 |
+
batch["input_length"] = len(batch["input_values"])
|
137 |
+
batch["labels"] = processor(text=batch["sentence"]).input_ids
|
138 |
+
return batch
|
139 |
+
|
140 |
+
try:
|
141 |
+
train = train.map(prepare_dataset, remove_columns=train.column_names, num_proc=num_proc)
|
142 |
+
except:
|
143 |
+
train = train.map(prepare_dataset, remove_columns=train.column_names, num_proc=4)
|
144 |
+
|
145 |
+
try:
|
146 |
+
dev = dev.map(prepare_dataset, remove_columns=dev.column_names, num_proc=num_proc)
|
147 |
+
except:
|
148 |
+
dev = dev.map(prepare_dataset, remove_columns=dev.column_names, num_proc=4)
|
149 |
+
|
150 |
+
import torch
|
151 |
+
|
152 |
+
from dataclasses import dataclass, field
|
153 |
+
from typing import Any, Dict, List, Optional, Union
|
154 |
+
|
155 |
+
@dataclass
|
156 |
+
class DataCollatorCTCWithPadding:
|
157 |
+
"""
|
158 |
+
Data collator that will dynamically pad the inputs received.
|
159 |
+
Args:
|
160 |
+
processor (:class:`~transformers.Wav2Vec2Processor`)
|
161 |
+
The processor used for proccessing the data.
|
162 |
+
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
|
163 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
164 |
+
among:
|
165 |
+
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
166 |
+
sequence if provided).
|
167 |
+
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
168 |
+
maximum acceptable input length for the model if that argument is not provided.
|
169 |
+
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
170 |
+
different lengths).
|
171 |
+
"""
|
172 |
+
|
173 |
+
processor: Wav2Vec2Processor
|
174 |
+
padding: Union[bool, str] = True
|
175 |
+
|
176 |
+
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
177 |
+
# split inputs and labels since they have to be of different lengths and need
|
178 |
+
# different padding methods
|
179 |
+
input_features = [{"input_values": feature["input_values"]} for feature in features]
|
180 |
+
label_features = [{"input_ids": feature["labels"]} for feature in features]
|
181 |
+
|
182 |
+
batch = self.processor.pad(
|
183 |
+
input_features=input_features,
|
184 |
+
padding=self.padding,
|
185 |
+
return_tensors="pt",
|
186 |
+
)
|
187 |
+
|
188 |
+
labels_batch = self.processor.pad(
|
189 |
+
labels=label_features,
|
190 |
+
padding=self.padding,
|
191 |
+
return_tensors="pt",
|
192 |
+
)
|
193 |
+
|
194 |
+
# replace padding with -100 to ignore loss correctly
|
195 |
+
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
196 |
+
|
197 |
+
batch["labels"] = labels
|
198 |
+
|
199 |
+
return batch
|
200 |
+
|
201 |
+
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
|
202 |
+
|
203 |
+
"""# Model Training"""
|
204 |
+
|
205 |
+
import evaluate
|
206 |
+
|
207 |
+
wer_metric = evaluate.load("wer")
|
208 |
+
cer_metric = evaluate.load("cer")
|
209 |
+
|
210 |
+
import numpy as np
|
211 |
+
|
212 |
+
def compute_metrics(pred):
|
213 |
+
pred_logits = pred.predictions
|
214 |
+
pred_ids = np.argmax(pred_logits, axis=-1)
|
215 |
+
|
216 |
+
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
|
217 |
+
|
218 |
+
pred_str = processor.batch_decode(pred_ids)
|
219 |
+
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
|
220 |
+
|
221 |
+
wer = wer_metric.compute(predictions=pred_str, references=label_str)
|
222 |
+
cer = cer_metric.compute(predictions=pred_str, references=label_str)
|
223 |
+
|
224 |
+
return {"wer": wer, "cer": cer}
|
225 |
+
|
226 |
+
from transformers import Wav2Vec2ForCTC, TrainingArguments, Trainer, EarlyStoppingCallback
|
227 |
+
|
228 |
+
model_checkpoint = "facebook/wav2vec2-xls-r-300m"
|
229 |
+
|
230 |
+
model = Wav2Vec2ForCTC.from_pretrained(
|
231 |
+
model_checkpoint,
|
232 |
+
attention_dropout=0.0,
|
233 |
+
hidden_dropout=0.0,
|
234 |
+
feat_proj_dropout=0.0,
|
235 |
+
mask_time_prob=0.05,
|
236 |
+
layerdrop=0.0,
|
237 |
+
ctc_loss_reduction="mean",
|
238 |
+
pad_token_id=processor.tokenizer.pad_token_id,
|
239 |
+
vocab_size=len(processor.tokenizer),
|
240 |
+
)
|
241 |
+
|
242 |
+
model.freeze_feature_encoder()
|
243 |
+
|
244 |
+
import wandb
|
245 |
+
|
246 |
+
dataset = None
|
247 |
+
language = None
|
248 |
+
sample_hours = None
|
249 |
+
version = None
|
250 |
+
batch_size = None
|
251 |
+
grad_acc = 1
|
252 |
+
eval_batch_size = batch_size//2
|
253 |
+
epochs = None
|
254 |
+
output_dir = f"{model_checkpoint.split('/')[-1]}-{dataset}-{language}-{sample_hours}hrs-{version}"
|
255 |
+
|
256 |
+
wandb.init(
|
257 |
+
project="ASR Africa",
|
258 |
+
entity="asr-africa-research-team",
|
259 |
+
name=output_dir,
|
260 |
+
)
|
261 |
+
|
262 |
+
training_args = TrainingArguments(
|
263 |
+
output_dir=output_dir,
|
264 |
+
group_by_length=True,
|
265 |
+
per_device_train_batch_size=batch_size,
|
266 |
+
per_device_eval_batch_size=eval_batch_size,
|
267 |
+
gradient_accumulation_steps=grad_acc,
|
268 |
+
eval_strategy="epoch",
|
269 |
+
logging_strategy="epoch",
|
270 |
+
save_strategy="epoch",
|
271 |
+
num_train_epochs=epochs,
|
272 |
+
gradient_checkpointing=True,
|
273 |
+
fp16=True,
|
274 |
+
learning_rate=None,
|
275 |
+
lr_scheduler_type='linear',
|
276 |
+
warmup_ratio=None,
|
277 |
+
save_total_limit=2,
|
278 |
+
load_best_model_at_end=True,
|
279 |
+
metric_for_best_model="wer",
|
280 |
+
greater_is_better=False,
|
281 |
+
optim='adamw_torch',
|
282 |
+
push_to_hub=True,
|
283 |
+
hub_model_id=f"asr-africa/{output_dir}",
|
284 |
+
hub_private_repo=True,
|
285 |
+
dataloader_num_workers=num_dataloaders,
|
286 |
+
)
|
287 |
+
|
288 |
+
trainer = Trainer(
|
289 |
+
model=model,
|
290 |
+
data_collator=data_collator,
|
291 |
+
args=training_args,
|
292 |
+
compute_metrics=compute_metrics,
|
293 |
+
train_dataset=train,
|
294 |
+
eval_dataset=dev,
|
295 |
+
tokenizer=processor.feature_extractor,
|
296 |
+
callbacks=[
|
297 |
+
EarlyStoppingCallback(
|
298 |
+
early_stopping_patience=10,
|
299 |
+
early_stopping_threshold=1e-3
|
300 |
+
)
|
301 |
+
],
|
302 |
+
)
|
303 |
+
|
304 |
+
trainer.train()
|
305 |
+
|
306 |
+
kwargs = {
|
307 |
+
"dataset_tags": "",
|
308 |
+
"dataset": "",
|
309 |
+
"language": "",
|
310 |
+
"model_name": "",
|
311 |
+
"finetuned_from": model_checkpoint,
|
312 |
+
"tasks": "automatic-speech-recognition",
|
313 |
+
}
|
314 |
+
|
315 |
+
trainer.push_to_hub(**kwargs)
|
316 |
+
|
317 |
+
other_test_dataset_1 = load_dataset()
|
318 |
+
other_test_dataset_2 = load_dataset()
|
319 |
+
|
320 |
+
test = concatenate_datasets([test, other_test_dataset_1, other_test_dataset_2]).shuffle(42)
|
321 |
+
|
322 |
+
test = test.map(preprocess_text, num_proc=num_proc)
|
323 |
+
|
324 |
+
try:
|
325 |
+
test = test.map(get_lens, num_proc=num_proc)
|
326 |
+
except:
|
327 |
+
test = test.map(get_lens, num_proc=4)
|
328 |
+
|
329 |
+
test = test.filter(data_checks, num_proc=num_proc)
|
330 |
+
|
331 |
+
test_mean = np.mean(test['len_ratio'])
|
332 |
+
test_std = np.std(test['len_ratio'])
|
333 |
+
num_std_devs = 2
|
334 |
+
test = test.filter(lambda batch: (abs(batch['len_ratio'] - test_mean) - (num_std_devs * test_std)) <= 0, num_proc=num_proc)
|
335 |
+
|
336 |
+
print(f"Test hours: {sum(test['audio_len'])/3600}")
|
337 |
+
|
338 |
+
test = test.remove_columns(['audio_len', 'transcript_len', 'len_ratio', 'num_feature_vecs'])
|
339 |
+
|
340 |
+
try:
|
341 |
+
test = test.map(prepare_dataset, remove_columns=test.column_names, num_proc=num_proc)
|
342 |
+
except:
|
343 |
+
test = test.map(prepare_dataset, remove_columns=test.column_names, num_proc=4)
|
344 |
+
|
345 |
+
results = trainer.evaluate(eval_dataset=test, metric_key_prefix="test")
|
346 |
+
print(results)
|
347 |
+
|
348 |
+
wandb.log(results)
|
349 |
+
|
350 |
+
train.cleanup_cache_files()
|
351 |
+
dev.cleanup_cache_files()
|
352 |
+
test.cleanup_cache_files()
|
353 |
+
|
354 |
+
torch.cuda.empty_cache()
|