pranay-j commited on
Commit
4249f1d
1 Parent(s): 64056db

update model card README.md

Browse files
Files changed (2) hide show
  1. README.md +81 -0
  2. fine-tune-whisper-streaming.ipynb +58 -6
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - hy
4
+ license: apache-2.0
5
+ tags:
6
+ - whisper-event
7
+ - generated_from_trainer
8
+ datasets:
9
+ - mozilla-foundation/common_voice_11_0
10
+ metrics:
11
+ - wer
12
+ model-index:
13
+ - name: Whisper
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: common_voice_11_0
20
+ type: mozilla-foundation/common_voice_11_0
21
+ config: hy-AM
22
+ split: test
23
+ args: hy-AM
24
+ metrics:
25
+ - name: Wer
26
+ type: wer
27
+ value: 39.73684210526316
28
+ ---
29
+
30
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
31
+ should probably proofread and complete it, then remove this comment. -->
32
+
33
+ # Whisper
34
+
35
+ This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the common_voice_11_0 dataset.
36
+ It achieves the following results on the evaluation set:
37
+ - Loss: 0.4380
38
+ - Wer: 39.7368
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 1e-05
58
+ - train_batch_size: 8
59
+ - eval_batch_size: 8
60
+ - seed: 42
61
+ - gradient_accumulation_steps: 2
62
+ - total_train_batch_size: 16
63
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
64
+ - lr_scheduler_type: linear
65
+ - lr_scheduler_warmup_steps: 100
66
+ - training_steps: 1500
67
+ - mixed_precision_training: Native AMP
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
72
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|
73
+ | 0.0001 | 34.0 | 1500 | 0.4380 | 39.7368 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.26.0.dev0
79
+ - Pytorch 1.13.1+cu117
80
+ - Datasets 2.7.1.dev0
81
+ - Tokenizers 0.13.2
fine-tune-whisper-streaming.ipynb CHANGED
@@ -908,7 +908,7 @@
908
  },
909
  {
910
  "cell_type": "code",
911
- "execution_count": null,
912
  "id": "ee8b7b8e-1c9a-4d77-9137-1778a629e6de",
913
  "metadata": {},
914
  "outputs": [
@@ -937,8 +937,8 @@
937
  "\n",
938
  " <div>\n",
939
  " \n",
940
- " <progress value='1501' max='1500' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
941
- " [1500/1500 1:57:29, Epoch 34.00/9223372036854775807]\n",
942
  " </div>\n",
943
  " <table border=\"1\" class=\"dataframe\">\n",
944
  " <thead>\n",
@@ -946,9 +946,16 @@
946
  " <th>Step</th>\n",
947
  " <th>Training Loss</th>\n",
948
  " <th>Validation Loss</th>\n",
 
949
  " </tr>\n",
950
  " </thead>\n",
951
  " <tbody>\n",
 
 
 
 
 
 
952
  " </tbody>\n",
953
  "</table><p>"
954
  ],
@@ -1787,8 +1794,35 @@
1787
  " \"transformers_version\": \"4.26.0.dev0\",\n",
1788
  " \"use_cache\": false\n",
1789
  "}\n",
1790
- "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1791
  ]
 
 
 
 
 
 
 
 
 
 
1792
  }
1793
  ],
1794
  "source": [
@@ -1817,7 +1851,7 @@
1817
  },
1818
  {
1819
  "cell_type": "code",
1820
- "execution_count": null,
1821
  "id": "6dd0e310-9b07-4133-ac14-2ed2d7524e22",
1822
  "metadata": {},
1823
  "outputs": [],
@@ -1846,7 +1880,25 @@
1846
  "execution_count": null,
1847
  "id": "95737cda-c5dd-4887-a4d0-dfcb0d61d977",
1848
  "metadata": {},
1849
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1850
  "source": [
1851
  "trainer.push_to_hub(**kwargs)"
1852
  ]
 
908
  },
909
  {
910
  "cell_type": "code",
911
+ "execution_count": 23,
912
  "id": "ee8b7b8e-1c9a-4d77-9137-1778a629e6de",
913
  "metadata": {},
914
  "outputs": [
 
937
  "\n",
938
  " <div>\n",
939
  " \n",
940
+ " <progress value='1500' max='1500' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
941
+ " [1500/1500 2:21:47, Epoch 34/9223372036854775807]\n",
942
  " </div>\n",
943
  " <table border=\"1\" class=\"dataframe\">\n",
944
  " <thead>\n",
 
946
  " <th>Step</th>\n",
947
  " <th>Training Loss</th>\n",
948
  " <th>Validation Loss</th>\n",
949
+ " <th>Wer</th>\n",
950
  " </tr>\n",
951
  " </thead>\n",
952
  " <tbody>\n",
953
+ " <tr>\n",
954
+ " <td>1500</td>\n",
955
+ " <td>0.000100</td>\n",
956
+ " <td>0.437977</td>\n",
957
+ " <td>39.736842</td>\n",
958
+ " </tr>\n",
959
  " </tbody>\n",
960
  "</table><p>"
961
  ],
 
1794
  " \"transformers_version\": \"4.26.0.dev0\",\n",
1795
  " \"use_cache\": false\n",
1796
  "}\n",
1797
+ "\n",
1798
+ "Saving model checkpoint to ./checkpoint-1500\n",
1799
+ "Configuration saved in ./checkpoint-1500/config.json\n",
1800
+ "Model weights saved in ./checkpoint-1500/pytorch_model.bin\n",
1801
+ "Feature extractor saved in ./checkpoint-1500/preprocessor_config.json\n",
1802
+ "tokenizer config file saved in ./checkpoint-1500/tokenizer_config.json\n",
1803
+ "Special tokens file saved in ./checkpoint-1500/special_tokens_map.json\n",
1804
+ "added tokens file saved in ./checkpoint-1500/added_tokens.json\n",
1805
+ "Feature extractor saved in ./preprocessor_config.json\n",
1806
+ "tokenizer config file saved in ./tokenizer_config.json\n",
1807
+ "Special tokens file saved in ./special_tokens_map.json\n",
1808
+ "added tokens file saved in ./added_tokens.json\n",
1809
+ "\n",
1810
+ "\n",
1811
+ "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
1812
+ "\n",
1813
+ "\n",
1814
+ "Loading best model from ./checkpoint-1500 (score: 39.73684210526316).\n"
1815
  ]
1816
+ },
1817
+ {
1818
+ "data": {
1819
+ "text/plain": [
1820
+ "TrainOutput(global_step=1500, training_loss=0.03859770913313453, metrics={'train_runtime': 8541.9827, 'train_samples_per_second': 2.81, 'train_steps_per_second': 0.176, 'total_flos': 5.15336070168576e+19, 'train_loss': 0.03859770913313453, 'epoch': 34.0})"
1821
+ ]
1822
+ },
1823
+ "execution_count": 23,
1824
+ "metadata": {},
1825
+ "output_type": "execute_result"
1826
  }
1827
  ],
1828
  "source": [
 
1851
  },
1852
  {
1853
  "cell_type": "code",
1854
+ "execution_count": 24,
1855
  "id": "6dd0e310-9b07-4133-ac14-2ed2d7524e22",
1856
  "metadata": {},
1857
  "outputs": [],
 
1880
  "execution_count": null,
1881
  "id": "95737cda-c5dd-4887-a4d0-dfcb0d61d977",
1882
  "metadata": {},
1883
+ "outputs": [
1884
+ {
1885
+ "name": "stderr",
1886
+ "output_type": "stream",
1887
+ "text": [
1888
+ "Saving model checkpoint to ./\n",
1889
+ "Configuration saved in ./config.json\n",
1890
+ "Model weights saved in ./pytorch_model.bin\n",
1891
+ "Feature extractor saved in ./preprocessor_config.json\n",
1892
+ "tokenizer config file saved in ./tokenizer_config.json\n",
1893
+ "Special tokens file saved in ./special_tokens_map.json\n",
1894
+ "added tokens file saved in ./added_tokens.json\n",
1895
+ "Several commits (2) will be pushed upstream.\n",
1896
+ "The progress bars may be unreliable.\n",
1897
+ "Upload file pytorch_model.bin: 0%| | 1.00/5.75G [00:00<?, ?B/s]\n",
1898
+ "Upload file runs/Dec19_10-11-44_132-145-194-8/events.out.tfevents.1671444716.132-145-194-8.3465423.0: 0%| | 1.00/14.0k [00:00<?, ?B/s]\u001b[A"
1899
+ ]
1900
+ }
1901
+ ],
1902
  "source": [
1903
  "trainer.push_to_hub(**kwargs)"
1904
  ]