SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/google_t5
/t5_small_twitter
/train_results.json
{ | |
"epoch": 3.0, | |
"train_loss": 0.44335933526357013, | |
"train_runtime": 92.5, | |
"train_samples": 8700, | |
"train_samples_per_second": 282.162, | |
"train_steps_per_second": 8.822 | |
} |