SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/LoRA
/google
/gemma_7b_LoRA_coastalcph
/lex_glue_ledgar
/all_results.json
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.8662, | |
"eval_f1_macro": 0.7935499844173846, | |
"eval_f1_micro": 0.8662, | |
"eval_loss": 0.5041437745094299, | |
"eval_runtime": 150.7404, | |
"eval_samples": 10000, | |
"eval_samples_per_second": 66.339, | |
"eval_steps_per_second": 2.076, | |
"test_accuracy": 0.8696, | |
"test_f1_macro": 0.7903063371841208, | |
"test_f1_micro": 0.8696, | |
"test_loss": 0.49892714619636536, | |
"test_runtime": 151.1908, | |
"test_samples_per_second": 66.142, | |
"test_steps_per_second": 2.07, | |
"train_loss": 0.4508466554853651, | |
"train_runtime": 15613.1858, | |
"train_samples": 60000, | |
"train_samples_per_second": 11.529, | |
"train_steps_per_second": 0.36 | |
} |