t5_large_baseline / all_results.json
TheLongSentance
add model
bddd597
raw
history blame contribute delete
845 Bytes
{
"epoch": 3.0,
"eval_gen_len": 46.715,
"eval_loss": 0.0010262294672429562,
"eval_rouge1": 99.8958,
"eval_rouge2": 99.8696,
"eval_rougeL": 99.8958,
"eval_rougeLsum": 99.8958,
"eval_runtime": 101.1251,
"eval_samples": 200,
"eval_samples_per_second": 1.978,
"eval_steps_per_second": 0.494,
"predict_gen_len": 46.705,
"predict_loss": 0.0036411203909665346,
"predict_rouge1": 99.6781,
"predict_rouge2": 99.5066,
"predict_rougeL": 99.6729,
"predict_rougeLsum": 99.6874,
"predict_runtime": 101.703,
"predict_samples": 200,
"predict_samples_per_second": 1.967,
"predict_steps_per_second": 0.492,
"train_loss": 0.13440267986721463,
"train_runtime": 578.2653,
"train_samples": 600,
"train_samples_per_second": 3.113,
"train_steps_per_second": 0.778
}