Spaces:
Runtime error
Runtime error
Add finnetuing process configuration to model
Browse files- spanish_medica_llm.py +8 -9
spanish_medica_llm.py
CHANGED
@@ -647,7 +647,6 @@ def configAndRunFineTuning(basemodel, dataset, eval_dataset, tokenizer):
|
|
647 |
evaluation_strategy = "steps", # Evaluate the model every logging step
|
648 |
eval_steps = 50, # Evaluate and save checkpoints every 50 steps
|
649 |
do_eval = True, # Perform evaluation at the end of training
|
650 |
-
eval_steps=50,
|
651 |
save_total_limit=2,
|
652 |
remove_unused_columns = True,
|
653 |
report_to = None, # Comment this out if you don't want to use weights & baises
|
@@ -660,16 +659,16 @@ def configAndRunFineTuning(basemodel, dataset, eval_dataset, tokenizer):
|
|
660 |
model=basemodel,
|
661 |
train_dataset = dataset,
|
662 |
eval_dataset = eval_dataset,
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
"add_special_tokens": False, # We template with special tokens
|
670 |
"append_concat_token": False, # No need to add additional separator token
|
671 |
-
|
672 |
-
|
673 |
)
|
674 |
basemodel.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
675 |
trainer.train()
|
|
|
647 |
evaluation_strategy = "steps", # Evaluate the model every logging step
|
648 |
eval_steps = 50, # Evaluate and save checkpoints every 50 steps
|
649 |
do_eval = True, # Perform evaluation at the end of training
|
|
|
650 |
save_total_limit=2,
|
651 |
remove_unused_columns = True,
|
652 |
report_to = None, # Comment this out if you don't want to use weights & baises
|
|
|
659 |
model=basemodel,
|
660 |
train_dataset = dataset,
|
661 |
eval_dataset = eval_dataset,
|
662 |
+
peft_config = getLoraConfiguration(),
|
663 |
+
dataset_text_field = "raw_text",
|
664 |
+
max_seq_length = 1024, #512
|
665 |
+
tokenizer = tokenizer,
|
666 |
+
args = training_args,
|
667 |
+
dataset_kwargs={
|
668 |
"add_special_tokens": False, # We template with special tokens
|
669 |
"append_concat_token": False, # No need to add additional separator token
|
670 |
+
},
|
671 |
+
packing=True
|
672 |
)
|
673 |
basemodel.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
674 |
trainer.train()
|