SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/LoRA
/google
/gemma_7b_LoRA_MAdAiLab
/amazon_attrprompt
/all_results.json
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.8998682476943346, | |
"eval_f1_macro": 0.8835874008364096, | |
"eval_f1_micro": 0.8998682476943346, | |
"eval_loss": 0.3677658438682556, | |
"eval_runtime": 22.339, | |
"eval_samples": 1518, | |
"eval_samples_per_second": 67.953, | |
"eval_steps_per_second": 2.149, | |
"test_accuracy": 0.9077733860342556, | |
"test_f1_macro": 0.8916342550206725, | |
"test_f1_micro": 0.9077733860342556, | |
"test_loss": 0.3374621868133545, | |
"test_runtime": 22.7942, | |
"test_samples_per_second": 66.596, | |
"test_steps_per_second": 2.106, | |
"train_loss": 0.490123635007624, | |
"train_runtime": 2106.5673, | |
"train_samples": 12144, | |
"train_samples_per_second": 17.294, | |
"train_steps_per_second": 0.541 | |
} |