|
{ |
|
"best_metric": 0.69346523, |
|
"best_model_checkpoint": "/mnt/cachenew/yangzekang/wsdm_lym/swift_wsdm/output/solar-10-7b-instruct-v1/v8-20240205-114459/checkpoint-1700", |
|
"epoch": 1.9144144144144144, |
|
"eval_steps": 1700, |
|
"global_step": 1700, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"acc": 0.67516661, |
|
"epoch": 0.0, |
|
"learning_rate": 9.345794392523364e-07, |
|
"loss": 1.46797299, |
|
"step": 1 |
|
}, |
|
{ |
|
"acc": 0.74959772, |
|
"epoch": 0.11, |
|
"learning_rate": 9.158878504672898e-05, |
|
"loss": 0.95736756, |
|
"step": 100 |
|
}, |
|
{ |
|
"acc": 0.78039505, |
|
"epoch": 0.23, |
|
"learning_rate": 9.735849056603774e-05, |
|
"loss": 0.77719513, |
|
"step": 200 |
|
}, |
|
{ |
|
"acc": 0.78730965, |
|
"epoch": 0.34, |
|
"learning_rate": 9.4455732946299e-05, |
|
"loss": 0.74986748, |
|
"step": 300 |
|
}, |
|
{ |
|
"acc": 0.79136169, |
|
"epoch": 0.45, |
|
"learning_rate": 9.155297532656024e-05, |
|
"loss": 0.73882896, |
|
"step": 400 |
|
}, |
|
{ |
|
"acc": 0.79132759, |
|
"epoch": 0.56, |
|
"learning_rate": 8.865021770682148e-05, |
|
"loss": 0.7381105, |
|
"step": 500 |
|
}, |
|
{ |
|
"acc": 0.79090462, |
|
"epoch": 0.68, |
|
"learning_rate": 8.574746008708273e-05, |
|
"loss": 0.73391434, |
|
"step": 600 |
|
}, |
|
{ |
|
"acc": 0.79388229, |
|
"epoch": 0.79, |
|
"learning_rate": 8.284470246734399e-05, |
|
"loss": 0.72463707, |
|
"step": 700 |
|
}, |
|
{ |
|
"acc": 0.79177132, |
|
"epoch": 0.9, |
|
"learning_rate": 7.994194484760524e-05, |
|
"loss": 0.72666443, |
|
"step": 800 |
|
}, |
|
{ |
|
"acc": 0.79278763, |
|
"epoch": 1.01, |
|
"learning_rate": 7.703918722786648e-05, |
|
"loss": 0.72437874, |
|
"step": 900 |
|
}, |
|
{ |
|
"acc": 0.81367294, |
|
"epoch": 1.13, |
|
"learning_rate": 7.413642960812773e-05, |
|
"loss": 0.63475815, |
|
"step": 1000 |
|
}, |
|
{ |
|
"acc": 0.81142548, |
|
"epoch": 1.24, |
|
"learning_rate": 7.123367198838897e-05, |
|
"loss": 0.63902611, |
|
"step": 1100 |
|
}, |
|
{ |
|
"acc": 0.81469452, |
|
"epoch": 1.35, |
|
"learning_rate": 6.833091436865022e-05, |
|
"loss": 0.63060787, |
|
"step": 1200 |
|
}, |
|
{ |
|
"acc": 0.81454559, |
|
"epoch": 1.46, |
|
"learning_rate": 6.542815674891147e-05, |
|
"loss": 0.63029087, |
|
"step": 1300 |
|
}, |
|
{ |
|
"acc": 0.81508331, |
|
"epoch": 1.58, |
|
"learning_rate": 6.252539912917271e-05, |
|
"loss": 0.62843025, |
|
"step": 1400 |
|
}, |
|
{ |
|
"acc": 0.81447418, |
|
"epoch": 1.69, |
|
"learning_rate": 5.9622641509433966e-05, |
|
"loss": 0.62912048, |
|
"step": 1500 |
|
}, |
|
{ |
|
"acc": 0.81561844, |
|
"epoch": 1.8, |
|
"learning_rate": 5.671988388969521e-05, |
|
"loss": 0.63019512, |
|
"step": 1600 |
|
}, |
|
{ |
|
"acc": 0.8182132, |
|
"epoch": 1.91, |
|
"learning_rate": 5.381712626995646e-05, |
|
"loss": 0.61591331, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_acc": 0.8075339535878515, |
|
"eval_loss": 0.6934652328491211, |
|
"eval_runtime": 19.4269, |
|
"eval_samples_per_second": 7.412, |
|
"eval_steps_per_second": 3.706, |
|
"step": 1700 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 3552, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 1700, |
|
"total_flos": 2.3576867592376156e+18, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|