jraramhoej commited on
Commit
9612251
1 Parent(s): dc4f202

update model card README.md

Browse files
Files changed (2) hide show
  1. README.md +78 -0
  2. fine-tune-whisper-streaming.ipynb +39 -20
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - whisper-event
5
+ - generated_from_trainer
6
+ datasets:
7
+ - mozilla-foundation/common_voice_11_0
8
+ metrics:
9
+ - wer
10
+ model-index:
11
+ - name: Whisper Small Lithuanian and Serbian sequentially trained
12
+ results:
13
+ - task:
14
+ name: Automatic Speech Recognition
15
+ type: automatic-speech-recognition
16
+ dataset:
17
+ name: Common Voice 11.0
18
+ type: mozilla-foundation/common_voice_11_0
19
+ config: sr
20
+ split: test
21
+ args: sr
22
+ metrics:
23
+ - name: Wer
24
+ type: wer
25
+ value: 35.613112100364226
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # Whisper Small Lithuanian and Serbian sequentially trained
32
+
33
+ This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.6311
36
+ - Wer: 35.6131
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 1e-05
56
+ - train_batch_size: 64
57
+ - eval_batch_size: 8
58
+ - seed: 42
59
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
+ - lr_scheduler_type: linear
61
+ - lr_scheduler_warmup_steps: 500
62
+ - training_steps: 2000
63
+ - mixed_precision_training: Native AMP
64
+
65
+ ### Training results
66
+
67
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
68
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|
69
+ | 0.0003 | 49.01 | 1000 | 0.6035 | 26.7908 |
70
+ | 0.0002 | 99.01 | 2000 | 0.6311 | 35.6131 |
71
+
72
+
73
+ ### Framework versions
74
+
75
+ - Transformers 4.26.0.dev0
76
+ - Pytorch 1.13.0+cu117
77
+ - Datasets 2.7.1.dev0
78
+ - Tokenizers 0.13.2
fine-tune-whisper-streaming.ipynb CHANGED
@@ -1215,7 +1215,7 @@
1215
  },
1216
  {
1217
  "cell_type": "code",
1218
- "execution_count": 24,
1219
  "id": "95737cda-c5dd-4887-a4d0-dfcb0d61d977",
1220
  "metadata": {},
1221
  "outputs": [
@@ -1230,28 +1230,47 @@
1230
  "tokenizer config file saved in ./tokenizer_config.json\n",
1231
  "Special tokens file saved in ./special_tokens_map.json\n",
1232
  "added tokens file saved in ./added_tokens.json\n",
1233
- "Several commits (2) will be pushed upstream.\n",
1234
- "The progress bars may be unreliable.\n",
1235
- "fatal: unable to access 'https://huggingface.co/jraramhoej/whisper-small-lt-sr-v2/': The requested URL returned error: 502\n",
1236
- "\n"
1237
  ]
1238
  },
1239
  {
1240
- "ename": "OSError",
1241
- "evalue": "fatal: unable to access 'https://huggingface.co/jraramhoej/whisper-small-lt-sr-v2/': The requested URL returned error: 502\n",
1242
- "output_type": "error",
1243
- "traceback": [
1244
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
1245
- "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)",
1246
- "\u001b[0;32m~/.local/lib/python3.8/site-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mgit_push\u001b[0;34m(self, upstream, blocking, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1206\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mreturn_code\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1207\u001b[0;31m raise subprocess.CalledProcessError(\n\u001b[0m\u001b[1;32m 1208\u001b[0m \u001b[0mreturn_code\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstderr\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1247
- "\u001b[0;31mCalledProcessError\u001b[0m: Command '['git', 'push', '--set-upstream', 'origin', 'main']' returned non-zero exit status 128.",
1248
- "\nDuring handling of the above exception, another exception occurred:\n",
1249
- "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
1250
- "\u001b[0;32m<ipython-input-24-ce0a97cc110b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpush_to_hub\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
1251
- "\u001b[0;32m~/.local/lib/python3.8/site-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mpush_to_hub\u001b[0;34m(self, commit_message, blocking, **kwargs)\u001b[0m\n\u001b[1;32m 3489\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpush_in_progress\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3490\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3491\u001b[0;31m git_head_commit_url = self.repo.push_to_hub(\n\u001b[0m\u001b[1;32m 3492\u001b[0m \u001b[0mcommit_message\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcommit_message\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mblocking\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauto_lfs_prune\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3493\u001b[0m )\n",
1252
- "\u001b[0;32m~/.local/lib/python3.8/site-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mpush_to_hub\u001b[0;34m(self, commit_message, blocking, clean_ok, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1430\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgit_add\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mauto_lfs_track\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1431\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgit_commit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcommit_message\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1432\u001b[0;31m return self.git_push(\n\u001b[0m\u001b[1;32m 1433\u001b[0m \u001b[0mupstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34mf\"origin {self.current_branch}\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1434\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mblocking\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1253
- "\u001b[0;32m~/.local/lib/python3.8/site-packages/huggingface_hub/repository.py\u001b[0m in \u001b[0;36mgit_push\u001b[0;34m(self, upstream, blocking, auto_lfs_prune)\u001b[0m\n\u001b[1;32m 1210\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1211\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1212\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mEnvironmentError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstderr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1213\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1214\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mblocking\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1254
- "\u001b[0;31mOSError\u001b[0m: fatal: unable to access 'https://huggingface.co/jraramhoej/whisper-small-lt-sr-v2/': The requested URL returned error: 502\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255
  ]
1256
  }
1257
  ],
 
1215
  },
1216
  {
1217
  "cell_type": "code",
1218
+ "execution_count": null,
1219
  "id": "95737cda-c5dd-4887-a4d0-dfcb0d61d977",
1220
  "metadata": {},
1221
  "outputs": [
 
1230
  "tokenizer config file saved in ./tokenizer_config.json\n",
1231
  "Special tokens file saved in ./special_tokens_map.json\n",
1232
  "added tokens file saved in ./added_tokens.json\n",
1233
+ "Several commits (3) will be pushed upstream.\n",
1234
+ "The progress bars may be unreliable.\n"
 
 
1235
  ]
1236
  },
1237
  {
1238
+ "data": {
1239
+ "application/vnd.jupyter.widget-view+json": {
1240
+ "model_id": "6b53c49cc54e4a3f88da039b5a18c439",
1241
+ "version_major": 2,
1242
+ "version_minor": 0
1243
+ },
1244
+ "text/plain": [
1245
+ "Upload file pytorch_model.bin: 0%| | 32.0k/922M [00:00<?, ?B/s]"
1246
+ ]
1247
+ },
1248
+ "metadata": {},
1249
+ "output_type": "display_data"
1250
+ },
1251
+ {
1252
+ "data": {
1253
+ "application/vnd.jupyter.widget-view+json": {
1254
+ "model_id": "de9a0ebf1bf845229bdf5a78593d4852",
1255
+ "version_major": 2,
1256
+ "version_minor": 0
1257
+ },
1258
+ "text/plain": [
1259
+ "Upload file runs/Dec15_14-18-11_129-213-18-196/events.out.tfevents.1671113929.129-213-18-196.4128231.0: 100%|#…"
1260
+ ]
1261
+ },
1262
+ "metadata": {},
1263
+ "output_type": "display_data"
1264
+ },
1265
+ {
1266
+ "name": "stderr",
1267
+ "output_type": "stream",
1268
+ "text": [
1269
+ "remote: Scanning LFS files for validity, may be slow... \n",
1270
+ "remote: LFS file scan complete. \n",
1271
+ "To https://huggingface.co/jraramhoej/whisper-small-lt-sr-v2\n",
1272
+ " 36a641a..dc4f202 main -> main\n",
1273
+ "\n"
1274
  ]
1275
  }
1276
  ],