diff --git "a/nohup.out" "b/nohup.out" new file mode 100644--- /dev/null +++ "b/nohup.out" @@ -0,0 +1,20593 @@ +wandb: Currently logged in as: sanchit-gandhi (use `wandb login --relogin` to force relogin) +wandb: wandb version 0.13.2 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.12.15 +wandb: Run data is saved locally in /home/sanchitgandhi/flax-wav2vec2-2-bart-large-ls-960h-black-box/wandb/run-20220828_085247-2hx8pk65 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run flax-wav2vec2-2-bart-large-ls-960h-black-box +wandb: ⭐️ View project at https://wandb.ai/sanchit-gandhi/librispeech_960h +wandb: 🚀 View run at https://wandb.ai/sanchit-gandhi/librispeech_960h/runs/2hx8pk65 +INFO:__main__:Training/evaluation parameters FlaxSeq2SeqTrainingArguments( +_n_gpu=-1, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +bf16=False, +bf16_full_eval=False, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_pin_memory=True, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +debug=, +deepspeed=None, +disable_tqdm=None, +do_eval=True, +do_predict=True, +do_train=True, +eval_accumulation_steps=None, +eval_delay=0, +eval_steps=10000, +evaluation_strategy=no, +final_generation_max_length=200, +final_generation_num_beams=5, +fp16=False, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=, +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +generation_length_penalty=1.2, +generation_max_length=200, +generation_num_beams=5, +gradient_accumulation_steps=1, +gradient_checkpointing=True, +greater_is_better=None, +group_by_length=False, +half_precision_backend=auto, +hub_model_id=None, +hub_private_repo=False, +hub_strategy=every_save, +hub_token=, +ignore_data_skip=False, +include_inputs_for_metrics=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=0.0001, +length_column_name=length, +load_best_model_at_end=False, +local_rank=-1, +log_level=passive, +log_level_replica=passive, +log_on_each_node=True, +logging_dir=None, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=25, +logging_strategy=steps, +lr_scheduler_type=linear, +matmul_precision=default, +max_grad_norm=1.0, +max_steps=50000, +metric_for_best_model=None, +mp_parameters=, +no_cuda=False, +num_train_epochs=3.0, +optim=adamw_hf, +output_dir=./, +overwrite_output_dir=True, +past_index=-1, +per_device_eval_batch_size=4, +per_device_train_batch_size=8, +precision=full, +predict_with_generate=True, +prediction_loss_only=False, +push_to_hub=True, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_token=, +ray_scope=last, +remove_unused_columns=True, +report_to=None, +resume_from_checkpoint=None, +run_name=None, +save_on_each_node=False, +save_steps=10000, +save_strategy=steps, +save_total_limit=None, +seed=42, +sharded_ddp=, +skip_memory_metrics=True, +sortish_sampler=False, +tf32=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_ipex=False, +use_legacy_prediction_loop=False, +warmup_ratio=0.0, +warmup_steps=500, +weight_decay=0.0, +xpu_backend=None, +) +INFO:__main__:JAX devices: 8, matmul precision: default +Downloading and preparing dataset librispeech_asr/all (download: 57.14 GiB, generated: 59.44 GiB, post-processed: Unknown size, total: 116.59 GiB) to /home/sanchitgandhi/cache/huggingface/datasets/librispeech_asr/all/2.1.0/14c8bffddb861b4b3a4fcdff648a56980dbb808f3fc56f5a3d56b18ee88458eb... + Downloading data files: 0% 0/7 [00:00