diff --git "a/wandb/run-20220504_095140-cwhobv6l/files/output.log" "b/wandb/run-20220504_095140-cwhobv6l/files/output.log" new file mode 100644--- /dev/null +++ "b/wandb/run-20220504_095140-cwhobv6l/files/output.log" @@ -0,0 +1,2987 @@ +wandb: WARNING Config item 'output_dir' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'evaluation_strategy' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'per_device_train_batch_size' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'per_device_eval_batch_size' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'gradient_accumulation_steps' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'learning_rate' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'num_train_epochs' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'warmup_steps' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'logging_steps' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'save_steps' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'eval_steps' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'metric_for_best_model' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'greater_is_better' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'generation_max_length' was locked by 'sweep' (ignored update). +wandb: WARNING Config item 'generation_num_beams' was locked by 'sweep' (ignored update). + 0%| | 0/19440 [00:00 + main() + File "/home/sanchit_huggingface_co/xtreme_s_xlsr_2_bart_covost2_fr_en_2/run_xtreme_s.py", line 874, in main + train_result = trainer.train(resume_from_checkpoint=checkpoint) + File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 1524, in train + self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 1647, in _maybe_log_save_evaluate + self.log(logs) + File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer.py", line 1960, in log + self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) + File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer_callback.py", line 381, in on_log + return self.call_event("on_log", args, state, control, logs=logs) + File "/home/sanchit_huggingface_co/transformers/src/transformers/trainer_callback.py", line 388, in call_event + result = getattr(callback, event)( + File "/home/sanchit_huggingface_co/transformers/src/transformers/integrations.py", line 658, in on_log + self._wandb.log({**logs, "train/global_step": state.global_step}) + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 1349, in log + self.history._row_add(data) + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/wandb_history.py", line 44, in _row_add + self._flush() + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/wandb_history.py", line 59, in _flush + self._callback(row=self._data, step=self._step) + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 1027, in _history_callback + self._backend.interface.publish_history( + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface.py", line 506, in publish_history + self._publish_history(history) + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface_shared.py", line 59, in _publish_history + self._publish(rec) + File "/home/sanchit_huggingface_co/gcp/lib/python3.9/site-packages/wandb/sdk/interface/interface_queue.py", line 49, in _publish + raise Exception("The wandb backend process has shutdown") +Exception: The wandb backend process has shutdown \ No newline at end of file