File size: 1,106 Bytes
8168661
 
 
112f924
 
8168661
844c8c5
8168661
 
 
 
 
 
112f924
8168661
 
 
 
 
 
83407cc
8168661
 
 
 
83407cc
 
8168661
112f924
 
8168661
 
 
112f924
8168661
 
66fa49f
8168661
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# Whisper Finetuning script for the NST dataset
# This is a test script for XLA on TPU

#PT_XLA_DEBUG=1 
python xla_spawn.py --num_cores=4 run_whisper_finetuning.py\
	--model_name_or_path="openai/whisper-small" \
	--output_dir="../whisper-NST-TPU-test4" \
	--overwrite_output_dir=True \
	--language="Norwegian" \
	--task="transcribe" \
	--dataset_name="NbAiLab/NST" \
	--dataset_config="no-close" \
	--do_train=True \
	--do_eval=False \
	--audio_column_name="audio" \
	--text_column_name="text" \
	--per_device_train_batch_size=16 \
    	--per_device_train_batch_size=16 \
	--learning_rate=2e-5 \
	--warmup_steps=0 \
	--max_steps=20 \
	--gradient_checkpointing=True \
	--gradient_accumulation_steps=1 \
	--group_by_length=False \
	--evaluation_strategy="steps" \
	--save_steps=20 \
	--eval_steps=1 \
	--max_eval_samples=2 \
	--logging_steps=100 \
	--load_best_model_at_end=False \
	--metric_for_best_model="wer" \
	--greater_is_better=False \
	--report_to="tensorboard" \
	--predict_with_generate=False \
	--generation_max_length=225 \
	--print_training_arguments=True \
	--xla=True \
	--push_to_hub=True