KoAlpaca-Polyglot-5.8B / trainer_state.json
june42's picture
Finetune model with following https://github.com/tatsu-lab/stanford_alpaca#fine-tuning
b7438fb
raw
history blame
1.46 kB
{
"best_metric": 0.2509765625,
"best_model_checkpoint": "./models/2023-03-16_1edc4008-e54e/checkpoint-2534",
"epoch": 3.0,
"global_step": 7602,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.9997369113391214e-05,
"loss": 9.8984,
"step": 1
},
{
"epoch": 1.0,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.2605,
"step": 2534
},
{
"epoch": 1.0,
"eval_loss": 0.2509765625,
"eval_runtime": 191.3685,
"eval_samples_per_second": 23.536,
"eval_steps_per_second": 2.942,
"step": 2534
},
{
"epoch": 2.0,
"learning_rate": 6.666666666666667e-06,
"loss": 0.1598,
"step": 5068
},
{
"epoch": 2.0,
"eval_loss": 0.258544921875,
"eval_runtime": 159.499,
"eval_samples_per_second": 28.238,
"eval_steps_per_second": 3.53,
"step": 5068
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 0.0753,
"step": 7602
},
{
"epoch": 3.0,
"eval_loss": 0.3056640625,
"eval_runtime": 159.3863,
"eval_samples_per_second": 28.258,
"eval_steps_per_second": 3.532,
"step": 7602
}
],
"max_steps": 7602,
"num_train_epochs": 3,
"total_flos": 2.1525111523686482e+18,
"trial_name": null,
"trial_params": null
}