Automatic Speech Recognition
Transformers
4 languages
whisper
whisper-event
Generated from Trainer
Inference Endpoints
marinone94 commited on
Commit
2affbb8
1 Parent(s): ed53c37

force train + eval

Browse files
run_speech_recognition_seq2seq_streaming.py CHANGED
@@ -346,7 +346,9 @@ def main():
346
  model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
347
  else:
348
  model_args, data_args, training_args = parser.parse_args_into_dataclasses()
349
-
 
 
350
  # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
351
  # information sent is the one passed as arguments along with your Python/PyTorch versions.
352
  send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
 
346
  model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
347
  else:
348
  model_args, data_args, training_args = parser.parse_args_into_dataclasses()
349
+ training_args.do_train = True
350
+ training_args.do_eval = True
351
+
352
  # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
353
  # information sent is the one passed as arguments along with your Python/PyTorch versions.
354
  send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
test_run.sh CHANGED
@@ -10,8 +10,8 @@ python $1run_speech_recognition_seq2seq_streaming.py \
10
  --max_eval_samples="32" \
11
  --max_steps="5000" \
12
  --output_dir="./" \
13
- --per_device_train_batch_size="64" \
14
- --per_device_eval_batch_size="32" \
15
  --logging_steps="25" \
16
  --learning_rate="1e-5" \
17
  --warmup_steps="500" \
 
10
  --max_eval_samples="32" \
11
  --max_steps="5000" \
12
  --output_dir="./" \
13
+ --per_device_train_batch_size="8" \
14
+ --per_device_eval_batch_size="4" \
15
  --logging_steps="25" \
16
  --learning_rate="1e-5" \
17
  --warmup_steps="500" \