SLM_vs_LLM_experiments
/
max_seq_length_128_experiments
/FacebookAI
/roberta_base_amazon
/all_results.json
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.80566534914361, | |
"eval_f1_macro": 0.7451626163175262, | |
"eval_f1_micro": 0.80566534914361, | |
"eval_loss": 0.6847230792045593, | |
"eval_runtime": 1.4571, | |
"eval_samples": 1518, | |
"eval_samples_per_second": 1041.828, | |
"eval_steps_per_second": 16.472, | |
"test_accuracy": 0.8326745718050066, | |
"test_f1_macro": 0.7741381548752201, | |
"test_f1_micro": 0.8326745718050066, | |
"test_loss": 0.6433278322219849, | |
"test_runtime": 1.4562, | |
"test_samples_per_second": 1042.471, | |
"test_steps_per_second": 16.482, | |
"train_loss": 0.9787228534096166, | |
"train_runtime": 143.678, | |
"train_samples": 12144, | |
"train_samples_per_second": 253.567, | |
"train_steps_per_second": 3.967 | |
} |