|
{ |
|
"best_metric": 0.29077759, |
|
"best_model_checkpoint": "/home/patrickbarker/output/qwen2-vl-7b-instruct/v1-20241219-214811/checkpoint-200", |
|
"epoch": 4.449438202247191, |
|
"eval_steps": 200, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"acc": 0.76875001, |
|
"epoch": 0.02247191011235955, |
|
"grad_norm": 29.470391176777557, |
|
"learning_rate": 0.0, |
|
"loss": 1.35673797, |
|
"memory(GiB)": 54.56, |
|
"step": 1, |
|
"train_speed(iter/s)": 0.02296 |
|
}, |
|
{ |
|
"acc": 0.77820516, |
|
"epoch": 0.11235955056179775, |
|
"grad_norm": 16.84515659540099, |
|
"learning_rate": 6.711877414712397e-06, |
|
"loss": 1.3136692, |
|
"memory(GiB)": 73.74, |
|
"step": 5, |
|
"train_speed(iter/s)": 0.030625 |
|
}, |
|
{ |
|
"acc": 0.82758846, |
|
"epoch": 0.2247191011235955, |
|
"grad_norm": 16.512337769405548, |
|
"learning_rate": 9.602525677891276e-06, |
|
"loss": 0.96648178, |
|
"memory(GiB)": 73.74, |
|
"step": 10, |
|
"train_speed(iter/s)": 0.031938 |
|
}, |
|
{ |
|
"acc": 0.83512478, |
|
"epoch": 0.33707865168539325, |
|
"grad_norm": 9.246001103598038, |
|
"learning_rate": 9.990965733615236e-06, |
|
"loss": 0.86084976, |
|
"memory(GiB)": 73.74, |
|
"step": 15, |
|
"train_speed(iter/s)": 0.032388 |
|
}, |
|
{ |
|
"acc": 0.86416492, |
|
"epoch": 0.449438202247191, |
|
"grad_norm": 12.349159321784564, |
|
"learning_rate": 9.954319977802235e-06, |
|
"loss": 0.74885101, |
|
"memory(GiB)": 73.74, |
|
"step": 20, |
|
"train_speed(iter/s)": 0.032624 |
|
}, |
|
{ |
|
"acc": 0.86872597, |
|
"epoch": 0.5617977528089888, |
|
"grad_norm": 5.739918942992134, |
|
"learning_rate": 9.889704834236431e-06, |
|
"loss": 0.67363873, |
|
"memory(GiB)": 73.74, |
|
"step": 25, |
|
"train_speed(iter/s)": 0.03277 |
|
}, |
|
{ |
|
"acc": 0.87583218, |
|
"epoch": 0.6741573033707865, |
|
"grad_norm": 4.32721377918037, |
|
"learning_rate": 9.797485121585682e-06, |
|
"loss": 0.74702172, |
|
"memory(GiB)": 73.74, |
|
"step": 30, |
|
"train_speed(iter/s)": 0.032865 |
|
}, |
|
{ |
|
"acc": 0.9008894, |
|
"epoch": 0.7865168539325843, |
|
"grad_norm": 4.562714773064729, |
|
"learning_rate": 9.678181514584457e-06, |
|
"loss": 0.54894347, |
|
"memory(GiB)": 73.74, |
|
"step": 35, |
|
"train_speed(iter/s)": 0.032932 |
|
}, |
|
{ |
|
"acc": 0.90051165, |
|
"epoch": 0.898876404494382, |
|
"grad_norm": 13.503139801961135, |
|
"learning_rate": 9.532467604291679e-06, |
|
"loss": 0.62093091, |
|
"memory(GiB)": 73.74, |
|
"step": 40, |
|
"train_speed(iter/s)": 0.032984 |
|
}, |
|
{ |
|
"acc": 0.8186779, |
|
"epoch": 1.0, |
|
"grad_norm": 6.94765792847355, |
|
"learning_rate": 9.397429019156841e-06, |
|
"loss": 0.47887707, |
|
"memory(GiB)": 73.74, |
|
"step": 45, |
|
"train_speed(iter/s)": 0.03339 |
|
}, |
|
{ |
|
"acc": 0.90204411, |
|
"epoch": 1.1123595505617978, |
|
"grad_norm": 5.053805856749545, |
|
"learning_rate": 9.206347037389492e-06, |
|
"loss": 0.55135803, |
|
"memory(GiB)": 73.74, |
|
"step": 50, |
|
"train_speed(iter/s)": 0.033362 |
|
}, |
|
{ |
|
"acc": 0.90205927, |
|
"epoch": 1.2247191011235956, |
|
"grad_norm": 4.031404833953091, |
|
"learning_rate": 8.991518741233478e-06, |
|
"loss": 0.53315639, |
|
"memory(GiB)": 73.74, |
|
"step": 55, |
|
"train_speed(iter/s)": 0.033361 |
|
}, |
|
{ |
|
"acc": 0.89569025, |
|
"epoch": 1.3370786516853932, |
|
"grad_norm": 4.80807450740523, |
|
"learning_rate": 8.754157056459702e-06, |
|
"loss": 0.58139644, |
|
"memory(GiB)": 73.74, |
|
"step": 60, |
|
"train_speed(iter/s)": 0.033359 |
|
}, |
|
{ |
|
"acc": 0.89294872, |
|
"epoch": 1.449438202247191, |
|
"grad_norm": 2.4132119888812698, |
|
"learning_rate": 8.495602132897754e-06, |
|
"loss": 0.59313574, |
|
"memory(GiB)": 73.74, |
|
"step": 65, |
|
"train_speed(iter/s)": 0.033356 |
|
}, |
|
{ |
|
"acc": 0.89750004, |
|
"epoch": 1.5617977528089888, |
|
"grad_norm": 3.83237148706138, |
|
"learning_rate": 8.21731377791749e-06, |
|
"loss": 0.53816686, |
|
"memory(GiB)": 73.74, |
|
"step": 70, |
|
"train_speed(iter/s)": 0.033357 |
|
}, |
|
{ |
|
"acc": 0.90059528, |
|
"epoch": 1.6741573033707864, |
|
"grad_norm": 4.066079361707885, |
|
"learning_rate": 7.920863214321187e-06, |
|
"loss": 0.54892931, |
|
"memory(GiB)": 73.74, |
|
"step": 75, |
|
"train_speed(iter/s)": 0.033355 |
|
}, |
|
{ |
|
"acc": 0.908568, |
|
"epoch": 1.7865168539325844, |
|
"grad_norm": 4.4036447401323455, |
|
"learning_rate": 7.607924209181516e-06, |
|
"loss": 0.50489616, |
|
"memory(GiB)": 73.74, |
|
"step": 80, |
|
"train_speed(iter/s)": 0.033354 |
|
}, |
|
{ |
|
"acc": 0.91108637, |
|
"epoch": 1.898876404494382, |
|
"grad_norm": 2.303374188105592, |
|
"learning_rate": 7.280263623712031e-06, |
|
"loss": 0.46924047, |
|
"memory(GiB)": 73.74, |
|
"step": 85, |
|
"train_speed(iter/s)": 0.033354 |
|
}, |
|
{ |
|
"acc": 0.81755037, |
|
"epoch": 2.0, |
|
"grad_norm": 3.6316429893731392, |
|
"learning_rate": 6.93973143752592e-06, |
|
"loss": 0.45278177, |
|
"memory(GiB)": 73.74, |
|
"step": 90, |
|
"train_speed(iter/s)": 0.033536 |
|
}, |
|
{ |
|
"acc": 0.9171875, |
|
"epoch": 2.1123595505617976, |
|
"grad_norm": 2.43556110559636, |
|
"learning_rate": 6.588250303606212e-06, |
|
"loss": 0.46200504, |
|
"memory(GiB)": 73.74, |
|
"step": 95, |
|
"train_speed(iter/s)": 0.033514 |
|
}, |
|
{ |
|
"acc": 0.91527672, |
|
"epoch": 2.2247191011235956, |
|
"grad_norm": 3.160658304441712, |
|
"learning_rate": 6.2278046929604265e-06, |
|
"loss": 0.46762981, |
|
"memory(GiB)": 73.74, |
|
"step": 100, |
|
"train_speed(iter/s)": 0.033505 |
|
}, |
|
{ |
|
"acc": 0.91525297, |
|
"epoch": 2.337078651685393, |
|
"grad_norm": 3.4672725798628012, |
|
"learning_rate": 5.860429690249112e-06, |
|
"loss": 0.46896749, |
|
"memory(GiB)": 73.74, |
|
"step": 105, |
|
"train_speed(iter/s)": 0.033496 |
|
}, |
|
{ |
|
"acc": 0.91085396, |
|
"epoch": 2.449438202247191, |
|
"grad_norm": 4.264836814343864, |
|
"learning_rate": 5.488199503648495e-06, |
|
"loss": 0.49584384, |
|
"memory(GiB)": 73.74, |
|
"step": 110, |
|
"train_speed(iter/s)": 0.033488 |
|
}, |
|
{ |
|
"acc": 0.91369057, |
|
"epoch": 2.561797752808989, |
|
"grad_norm": 1.8519201548674813, |
|
"learning_rate": 5.113215753820809e-06, |
|
"loss": 0.45219369, |
|
"memory(GiB)": 74.75, |
|
"step": 115, |
|
"train_speed(iter/s)": 0.033481 |
|
}, |
|
{ |
|
"acc": 0.9125, |
|
"epoch": 2.6741573033707864, |
|
"grad_norm": 4.899506906273797, |
|
"learning_rate": 4.737595608113059e-06, |
|
"loss": 0.4595274, |
|
"memory(GiB)": 74.75, |
|
"step": 120, |
|
"train_speed(iter/s)": 0.033476 |
|
}, |
|
{ |
|
"acc": 0.90626717, |
|
"epoch": 2.7865168539325844, |
|
"grad_norm": 2.3927675084287645, |
|
"learning_rate": 4.363459826978817e-06, |
|
"loss": 0.49834228, |
|
"memory(GiB)": 74.75, |
|
"step": 125, |
|
"train_speed(iter/s)": 0.033471 |
|
}, |
|
{ |
|
"acc": 0.92366066, |
|
"epoch": 2.898876404494382, |
|
"grad_norm": 2.463284179520905, |
|
"learning_rate": 3.9929207901132785e-06, |
|
"loss": 0.39038134, |
|
"memory(GiB)": 74.75, |
|
"step": 130, |
|
"train_speed(iter/s)": 0.033466 |
|
}, |
|
{ |
|
"acc": 0.81810102, |
|
"epoch": 3.0, |
|
"grad_norm": 2.4652024602525677, |
|
"learning_rate": 3.700486073914168e-06, |
|
"loss": 0.40796843, |
|
"memory(GiB)": 74.75, |
|
"step": 135, |
|
"train_speed(iter/s)": 0.033586 |
|
}, |
|
{ |
|
"acc": 0.91892242, |
|
"epoch": 3.1123595505617976, |
|
"grad_norm": 2.7719335270024406, |
|
"learning_rate": 3.3416720076404165e-06, |
|
"loss": 0.40879712, |
|
"memory(GiB)": 74.75, |
|
"step": 140, |
|
"train_speed(iter/s)": 0.033569 |
|
}, |
|
{ |
|
"acc": 0.9270833, |
|
"epoch": 3.2247191011235956, |
|
"grad_norm": 1.9611434725282086, |
|
"learning_rate": 2.99222372444748e-06, |
|
"loss": 0.41382871, |
|
"memory(GiB)": 74.75, |
|
"step": 145, |
|
"train_speed(iter/s)": 0.033562 |
|
}, |
|
{ |
|
"acc": 0.92385416, |
|
"epoch": 3.337078651685393, |
|
"grad_norm": 2.8780597897464637, |
|
"learning_rate": 2.6541142178183634e-06, |
|
"loss": 0.39298267, |
|
"memory(GiB)": 74.75, |
|
"step": 150, |
|
"train_speed(iter/s)": 0.033551 |
|
}, |
|
{ |
|
"acc": 0.9191987, |
|
"epoch": 3.449438202247191, |
|
"grad_norm": 3.7005032328018257, |
|
"learning_rate": 2.329252462228559e-06, |
|
"loss": 0.44139132, |
|
"memory(GiB)": 74.75, |
|
"step": 155, |
|
"train_speed(iter/s)": 0.033545 |
|
}, |
|
{ |
|
"acc": 0.92789841, |
|
"epoch": 3.561797752808989, |
|
"grad_norm": 3.204039430908735, |
|
"learning_rate": 2.019472635029862e-06, |
|
"loss": 0.36037457, |
|
"memory(GiB)": 74.75, |
|
"step": 160, |
|
"train_speed(iter/s)": 0.033535 |
|
}, |
|
{ |
|
"acc": 0.92560101, |
|
"epoch": 3.6741573033707864, |
|
"grad_norm": 2.2869606081065506, |
|
"learning_rate": 1.7265237606405478e-06, |
|
"loss": 0.39443879, |
|
"memory(GiB)": 74.75, |
|
"step": 165, |
|
"train_speed(iter/s)": 0.03353 |
|
}, |
|
{ |
|
"acc": 0.9338542, |
|
"epoch": 3.7865168539325844, |
|
"grad_norm": 2.4065886112138943, |
|
"learning_rate": 1.4520598355110829e-06, |
|
"loss": 0.36085033, |
|
"memory(GiB)": 74.75, |
|
"step": 170, |
|
"train_speed(iter/s)": 0.033524 |
|
}, |
|
{ |
|
"acc": 0.92566967, |
|
"epoch": 3.898876404494382, |
|
"grad_norm": 2.082750093607268, |
|
"learning_rate": 1.1976304896200528e-06, |
|
"loss": 0.38640497, |
|
"memory(GiB)": 74.75, |
|
"step": 175, |
|
"train_speed(iter/s)": 0.033518 |
|
}, |
|
{ |
|
"acc": 0.8370594, |
|
"epoch": 4.0, |
|
"grad_norm": 2.5249629065672883, |
|
"learning_rate": 9.64672237225702e-07, |
|
"loss": 0.32685859, |
|
"memory(GiB)": 74.75, |
|
"step": 180, |
|
"train_speed(iter/s)": 0.033605 |
|
}, |
|
{ |
|
"acc": 0.9390625, |
|
"epoch": 4.112359550561798, |
|
"grad_norm": 1.8419541872377712, |
|
"learning_rate": 7.545003662716096e-07, |
|
"loss": 0.32183304, |
|
"memory(GiB)": 74.75, |
|
"step": 185, |
|
"train_speed(iter/s)": 0.033592 |
|
}, |
|
{ |
|
"acc": 0.93617792, |
|
"epoch": 4.224719101123595, |
|
"grad_norm": 2.5663433640548448, |
|
"learning_rate": 5.683015122390326e-07, |
|
"loss": 0.32805934, |
|
"memory(GiB)": 74.75, |
|
"step": 190, |
|
"train_speed(iter/s)": 0.033585 |
|
}, |
|
{ |
|
"acc": 0.95014877, |
|
"epoch": 4.337078651685394, |
|
"grad_norm": 2.75420957077066, |
|
"learning_rate": 4.071269583742181e-07, |
|
"loss": 0.29385498, |
|
"memory(GiB)": 74.75, |
|
"step": 195, |
|
"train_speed(iter/s)": 0.033578 |
|
}, |
|
{ |
|
"acc": 0.92983627, |
|
"epoch": 4.449438202247191, |
|
"grad_norm": 3.2025724539840863, |
|
"learning_rate": 2.718867001176766e-07, |
|
"loss": 0.35593481, |
|
"memory(GiB)": 74.75, |
|
"step": 200, |
|
"train_speed(iter/s)": 0.033572 |
|
}, |
|
{ |
|
"epoch": 4.449438202247191, |
|
"eval_acc": 0.8987898789878987, |
|
"eval_loss": 0.2907775938510895, |
|
"eval_runtime": 23.6124, |
|
"eval_samples_per_second": 1.567, |
|
"eval_steps_per_second": 0.212, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 220, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 476842183622656.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|