File size: 1,313 Bytes
c9a10ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#!/usr/bin/env bash
python -m torch.distributed.launch \
        --nproc_per_node 2 run_speech_recognition_ctc.py \
        --dataset_name="common_voice" \
        --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
				--tokenizer_name_or_path="./created_tokenizer_from_cv_sv" \
        --dataset_config_name="sv-SE" \
				--phoneme_language="sv" \
        --output_dir="./wav2vec2-xls-r-300m-phoneme-sv" \
        --overwrite_output_dir \
        --num_train_epochs="100" \
        --per_device_train_batch_size="16" \
        --gradient_accumulation_steps="1" \
        --learning_rate="5e-4" \
        --warmup_steps="500" \
        --evaluation_strategy="steps" \
        --text_column_name="sentence" \
				--pad_token="<pad>" \
				--unk_token="<unk>" \
        --save_steps="100" \
        --eval_steps="100" \
        --logging_steps="1" \
        --layerdrop="0.0" \
        --save_total_limit="3" \
        --freeze_feature_extractor \
        --chars_to_ignore , ? . ! - \; \: \" “ % ‘ ” � \
				--mask_time_prob="0.3" \
				--mask_time_length="10" \
				--mask_feature_prob="0.1" \
				--mask_feature_length="64" \
        --fp16 \
        --group_by_length \
        --do_eval \
        --gradient_checkpointing \
        --use_auth_token \
			  --do_train --do_eval \
				--push_to_hub \