AndrewMcDowell commited on
Commit
6a0b76e
β€’
1 Parent(s): 17227ca

Training in progress, step 500

Browse files
.ipynb_checkpoints/run-checkpoint.sh CHANGED
@@ -3,11 +3,12 @@ python run_speech_recognition_ctc_bnb.py \
3
  --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
  --dataset_config_name="ar" \
5
  --output_dir="./" \
6
- --num_train_epochs="20" \
7
- --per_device_train_batch_size="32" \
 
8
  --per_device_eval_batch_size="8" \
9
  --gradient_accumulation_steps="4" \
10
- --learning_rate="1.5e-4" \
11
  --warmup_steps="2000" \
12
  --length_column_name="input_length" \
13
  --evaluation_strategy="steps" \
@@ -30,4 +31,5 @@ python run_speech_recognition_ctc_bnb.py \
30
  --fp16 \
31
  --group_by_length \
32
  --do_train --do_eval \
33
- --push_to_hub
 
 
3
  --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
  --dataset_config_name="ar" \
5
  --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="10" \
8
+ --per_device_train_batch_size="16" \
9
  --per_device_eval_batch_size="8" \
10
  --gradient_accumulation_steps="4" \
11
+ --learning_rate="6.5e-5" \
12
  --warmup_steps="2000" \
13
  --length_column_name="input_length" \
14
  --evaluation_strategy="steps" \
 
31
  --fp16 \
32
  --group_by_length \
33
  --do_train --do_eval \
34
+ --push_to_hub
35
+
previous_runs/.ipynb_checkpoints/run-checkpoint.sh ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc_bnb.py \
2
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
+ --dataset_config_name="ar" \
5
+ --output_dir="./" \
6
+ --num_train_epochs="20" \
7
+ --per_device_train_batch_size="32" \
8
+ --per_device_eval_batch_size="8" \
9
+ --gradient_accumulation_steps="4" \
10
+ --learning_rate="1.5e-4" \
11
+ --warmup_steps="2000" \
12
+ --length_column_name="input_length" \
13
+ --evaluation_strategy="steps" \
14
+ --text_column_name="sentence" \
15
+ --chars_to_ignore , ? . ! \- \; \: \" β€œ % β€˜ ” οΏ½ β€” ’ … – \
16
+ --save_steps="500" \
17
+ --eval_steps="500" \
18
+ --logging_steps="100" \
19
+ --layerdrop="0.0" \
20
+ --activation_dropout="0.1" \
21
+ --save_total_limit="4" \
22
+ --freeze_feature_encoder \
23
+ --feat_proj_dropout="0.0" \
24
+ --mask_time_prob="0.75" \
25
+ --mask_time_length="10" \
26
+ --mask_feature_prob="0.25" \
27
+ --mask_feature_length="64" \
28
+ --gradient_checkpointing \
29
+ --use_auth_token \
30
+ --fp16 \
31
+ --group_by_length \
32
+ --do_train --do_eval \
33
+ --push_to_hub
34
+
35
+ # Poor performance. Stopped at {'eval_loss': 1.338714361190796, 'eval_wer': 0.897404591449766, 'eval_runtime': 422.6845, 'eval_samples_per_second': 24.576, 'eval_steps_per_second': 3.073, 'epoch': 10.07}
previous_runs/run.sh ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc_bnb.py \
2
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
+ --dataset_config_name="ar" \
5
+ --output_dir="./" \
6
+ --num_train_epochs="20" \
7
+ --per_device_train_batch_size="32" \
8
+ --per_device_eval_batch_size="8" \
9
+ --gradient_accumulation_steps="4" \
10
+ --learning_rate="1.5e-4" \
11
+ --warmup_steps="2000" \
12
+ --length_column_name="input_length" \
13
+ --evaluation_strategy="steps" \
14
+ --text_column_name="sentence" \
15
+ --chars_to_ignore , ? . ! \- \; \: \" β€œ % β€˜ ” οΏ½ β€” ’ … – \
16
+ --save_steps="500" \
17
+ --eval_steps="500" \
18
+ --logging_steps="100" \
19
+ --layerdrop="0.0" \
20
+ --activation_dropout="0.1" \
21
+ --save_total_limit="4" \
22
+ --freeze_feature_encoder \
23
+ --feat_proj_dropout="0.0" \
24
+ --mask_time_prob="0.75" \
25
+ --mask_time_length="10" \
26
+ --mask_feature_prob="0.25" \
27
+ --mask_feature_length="64" \
28
+ --gradient_checkpointing \
29
+ --use_auth_token \
30
+ --fp16 \
31
+ --group_by_length \
32
+ --do_train --do_eval \
33
+ --push_to_hub
34
+
35
+ # Poor performance. Stopped at {'eval_loss': 1.338714361190796, 'eval_wer': 0.897404591449766, 'eval_runtime': 422.6845, 'eval_samples_per_second': 24.576, 'eval_steps_per_second': 3.073, 'epoch': 10.07}
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6813b2b0872c64e7a3717070cac4d515a66932195b3780d1a534af395051a571
3
  size 3850799473
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72110b18b9f181a1796af0610a022c116fbc3b53316387df30a797bd09e691ad
3
  size 3850799473
run.sh CHANGED
@@ -3,11 +3,12 @@ python run_speech_recognition_ctc_bnb.py \
3
  --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
  --dataset_config_name="ar" \
5
  --output_dir="./" \
6
- --num_train_epochs="20" \
7
- --per_device_train_batch_size="32" \
 
8
  --per_device_eval_batch_size="8" \
9
  --gradient_accumulation_steps="4" \
10
- --learning_rate="1.5e-4" \
11
  --warmup_steps="2000" \
12
  --length_column_name="input_length" \
13
  --evaluation_strategy="steps" \
@@ -30,4 +31,5 @@ python run_speech_recognition_ctc_bnb.py \
30
  --fp16 \
31
  --group_by_length \
32
  --do_train --do_eval \
33
- --push_to_hub
 
 
3
  --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
4
  --dataset_config_name="ar" \
5
  --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="10" \
8
+ --per_device_train_batch_size="16" \
9
  --per_device_eval_batch_size="8" \
10
  --gradient_accumulation_steps="4" \
11
+ --learning_rate="6.5e-5" \
12
  --warmup_steps="2000" \
13
  --length_column_name="input_length" \
14
  --evaluation_strategy="steps" \
 
31
  --fp16 \
32
  --group_by_length \
33
  --do_train --do_eval \
34
+ --push_to_hub
35
+
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76e7d1cf613331f4d89062dcf3e088ac0c1ea8e4317d97dfec009e3fad701c1e
3
  size 2991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4fb4cf661cbec66ff39edfd085a48937505029ebdfd04e974b45d65bbc7c64
3
  size 2991