SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_t5
/t5_base_twitter
/eval_results.json
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.765625, | |
"eval_f1_macro": 0.7266124240384777, | |
"eval_f1_micro": 0.765625, | |
"eval_loss": 0.49125435948371887, | |
"eval_runtime": 2.6918, | |
"eval_samples": 1088, | |
"eval_samples_per_second": 404.194, | |
"eval_steps_per_second": 12.631 | |
} |