distilbert-base-uncased-finetuned-emotion is not a directory

#8
by seobak - opened

from transformers import Trainer, TrainingArguments

batch_size = 64
logging_steps = len(emotions_encoded["train"]) // batch_size
model_name = f"{model_ckpt}-finetuned-emotion"
training_args = TrainingArguments(output_dir=model_name,
num_train_epochs=2,
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
weight_decay=0.01,
evaluation_strategy="epoch",
disable_tqdm=False,
logging_steps=logging_steps,
push_to_hub=True,
save_strategy="epoch",
load_best_model_at_end=True,
log_level="error")

from transformers import Trainer

trainer = Trainer(model=model, args=training_args,
compute_metrics=compute_metrics,
train_dataset=emotions_encoded["train"],
eval_dataset=emotions_encoded["validation"],
tokenizer=tokenizer)
trainer.train()


FailedPreconditionError Traceback (most recent call last)
Cell In[76], line 8
1 from transformers import Trainer
3 trainer = Trainer(model=model, args=training_args,
4 compute_metrics=compute_metrics,
5 train_dataset=emotions_encoded["train"],
6 eval_dataset=emotions_encoded["validation"],
7 tokenizer=tokenizer)
----> 8 trainer.train()

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\trainer.py:1534, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1531 try:
1532 # Disable progress bars when uploading models during checkpoints to avoid polluting stdout
1533 hf_hub_utils.disable_progress_bars()
-> 1534 return inner_training_loop(
1535 args=args,
1536 resume_from_checkpoint=resume_from_checkpoint,
1537 trial=trial,
1538 ignore_keys_for_eval=ignore_keys_for_eval,
1539 )
1540 finally:
1541 hf_hub_utils.enable_progress_bars()

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\trainer.py:1778, in Trainer._inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
1775 self._globalstep_last_logged = self.state.global_step
1776 model.zero_grad()
-> 1778 self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
1780 # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
1781 if not args.ignore_data_skip:

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\trainer_callback.py:370, in CallbackHandler.on_train_begin(self, args, state, control)
368 def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl):
369 control.should_training_stop = False
--> 370 return self.call_event("on_train_begin", args, state, control)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\trainer_callback.py:414, in CallbackHandler.call_event(self, event, args, state, control, **kwargs)
412 def call_event(self, event, args, state, control, **kwargs):
413 for callback in self.callbacks:
--> 414 result = getattr(callback, event)(
415 args,
416 state,
417 control,
418 model=self.model,
419 tokenizer=self.tokenizer,
420 optimizer=self.optimizer,
421 lr_scheduler=self.lr_scheduler,
422 train_dataloader=self.train_dataloader,
423 eval_dataloader=self.eval_dataloader,
424 **kwargs,
425 )
426 # A Callback can skip the return of control if it doesn't change it.
427 if result is not None:

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\integrations\integration_utils.py:628, in TensorBoardCallback.on_train_begin(self, args, state, control, **kwargs)
625 log_dir = os.path.join(args.logging_dir, trial_name)
627 if self.tb_writer is None:
--> 628 self._init_summary_writer(args, log_dir)
630 if self.tb_writer is not None:
631 self.tb_writer.add_text("args", args.to_json_string())

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\transformers\integrations\integration_utils.py:614, in TensorBoardCallback._init_summary_writer(self, args, log_dir)
612 log_dir = log_dir or args.logging_dir
613 if self._SummaryWriter is not None:
--> 614 self.tb_writer = self._SummaryWriter(log_dir=log_dir)

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\tensorboard\writer.py:247, in SummaryWriter.init(self, log_dir, comment, purge_step, max_queue, flush_secs, filename_suffix)
244 # Initialize the file writers, but they can be cleared out on close
245 # and recreated later as needed.
246 self.file_writer = self.all_writers = None
--> 247 self._get_file_writer()
249 # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard
250 v = 1e-12

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\tensorboard\writer.py:277, in SummaryWriter._get_file_writer(self)
275 """Returns the default FileWriter instance. Recreates it if closed."""
276 if self.all_writers is None or self.file_writer is None:
--> 277 self.file_writer = FileWriter(
278 self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix
279 )
280 self.all_writers = {self.file_writer.get_logdir(): self.file_writer}
281 if self.purge_step is not None:

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\utils\tensorboard\writer.py:76, in FileWriter.init(self, log_dir, max_queue, flush_secs, filename_suffix)
71 # Sometimes PosixPath is passed in and we need to coerce it to
72 # a string in all cases
73 # TODO: See if we can remove this in the future if we are
74 # actually the ones passing in a PosixPath
75 log_dir = str(log_dir)
---> 76 self.event_writer = EventFileWriter(
77 log_dir, max_queue, flush_secs, filename_suffix
78 )

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\tensorboard\summary\writer\event_file_writer.py:72, in EventFileWriter.init(self, logdir, max_queue_size, flush_secs, filename_suffix)
57 """Creates a EventFileWriter and an event file to write to.
58
59 On construction the summary writer creates a new event file in logdir.
(...)
69 pending events and summaries to disk.
70 """
71 self._logdir = logdir
---> 72 tf.io.gfile.makedirs(logdir)
73 self._file_name = (
74 os.path.join(
75 logdir,
(...)
84 + filename_suffix
85 ) # noqa E128
86 self._general_file_writer = tf.io.gfile.GFile(self._file_name, "wb")

File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\tensorflow\python\lib\io\file_io.py:513, in recursive_create_dir_v2(path)
501 @tf_export("io.gfile.makedirs")
502 def recursive_create_dir_v2(path):
503 """Creates a directory and all parent/intermediate directories.
504
505 It succeeds if path already exists and is writable.
(...)
511 errors.OpError: If the operation fails.
512 """
--> 513 _pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))

FailedPreconditionError: distilbert-base-uncased-finetuned-emotion is not a directory

An error occurred saying the directory does not exist. How do I solve this? help me plz

Sign up or log in to comment