zhangzhao219's picture
Upload 96 files
a542899 verified
{
"best_metric": 0.68423343,
"best_model_checkpoint": "/mnt/cachenew/yangzekang/wsdm_lym/swift_wsdm/output/solar-10-7b-instruct-v1/v33-20240210-002918/checkpoint-1700",
"epoch": 1.9144144144144144,
"eval_steps": 1700,
"global_step": 1700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"acc": 0.67516661,
"epoch": 0.0,
"learning_rate": 9.345794392523364e-07,
"loss": 1.46797299,
"step": 1
},
{
"acc": 0.75014827,
"epoch": 0.11,
"learning_rate": 9.158878504672898e-05,
"loss": 0.95607149,
"step": 100
},
{
"acc": 0.78095032,
"epoch": 0.23,
"learning_rate": 9.735849056603774e-05,
"loss": 0.77662766,
"step": 200
},
{
"acc": 0.78842911,
"epoch": 0.34,
"learning_rate": 9.4455732946299e-05,
"loss": 0.74797142,
"step": 300
},
{
"acc": 0.79210396,
"epoch": 0.45,
"learning_rate": 9.155297532656024e-05,
"loss": 0.73674324,
"step": 400
},
{
"acc": 0.79180176,
"epoch": 0.56,
"learning_rate": 8.865021770682148e-05,
"loss": 0.73681076,
"step": 500
},
{
"acc": 0.79079681,
"epoch": 0.68,
"learning_rate": 8.574746008708273e-05,
"loss": 0.7331163,
"step": 600
},
{
"acc": 0.79394348,
"epoch": 0.79,
"learning_rate": 8.284470246734399e-05,
"loss": 0.72321671,
"step": 700
},
{
"acc": 0.79240639,
"epoch": 0.9,
"learning_rate": 7.994194484760524e-05,
"loss": 0.72419327,
"step": 800
},
{
"acc": 0.79329529,
"epoch": 1.01,
"learning_rate": 7.703918722786648e-05,
"loss": 0.72283455,
"step": 900
},
{
"acc": 0.8133493,
"epoch": 1.13,
"learning_rate": 7.413642960812773e-05,
"loss": 0.63324795,
"step": 1000
},
{
"acc": 0.81173607,
"epoch": 1.24,
"learning_rate": 7.123367198838897e-05,
"loss": 0.63811573,
"step": 1100
},
{
"acc": 0.81487778,
"epoch": 1.35,
"learning_rate": 6.833091436865022e-05,
"loss": 0.62915279,
"step": 1200
},
{
"acc": 0.81486343,
"epoch": 1.46,
"learning_rate": 6.542815674891147e-05,
"loss": 0.62901955,
"step": 1300
},
{
"acc": 0.81607597,
"epoch": 1.58,
"learning_rate": 6.252539912917271e-05,
"loss": 0.62480858,
"step": 1400
},
{
"acc": 0.81506241,
"epoch": 1.69,
"learning_rate": 5.965166908563136e-05,
"loss": 0.62783173,
"step": 1500
},
{
"acc": 0.81640251,
"epoch": 1.8,
"learning_rate": 5.6748911465892595e-05,
"loss": 0.62696617,
"step": 1600
},
{
"acc": 0.8188623,
"epoch": 1.91,
"learning_rate": 5.384615384615385e-05,
"loss": 0.61394943,
"step": 1700
},
{
"epoch": 1.91,
"eval_acc": 0.8126138974724665,
"eval_loss": 0.6842334270477295,
"eval_runtime": 19.504,
"eval_samples_per_second": 7.383,
"eval_steps_per_second": 3.692,
"step": 1700
}
],
"logging_steps": 100,
"max_steps": 3552,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1700,
"total_flos": 2.357705064388231e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}