SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/LoRA
/Qwen
/Qwen1.5_7B_LoRA_MAdAiLab
/twitter_disaster
/eval_results.json
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.7766544117647058, | |
"eval_f1_macro": 0.7450627015924902, | |
"eval_f1_micro": 0.7766544117647058, | |
"eval_loss": 0.4901912808418274, | |
"eval_runtime": 13.0776, | |
"eval_samples": 1088, | |
"eval_samples_per_second": 83.196, | |
"eval_steps_per_second": 2.6 | |
} |