Qllama-.5B-RAG-1 / trainer_state.json
Josephgflowers's picture
Upload 13 files
402ed81 verified
raw
history blame contribute delete
No virus
7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 3643,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02744990392533626,
"grad_norm": 1.856189489364624,
"learning_rate": 4.865495470765852e-05,
"loss": 1.7566,
"step": 100
},
{
"epoch": 0.05489980785067252,
"grad_norm": 1.998369574546814,
"learning_rate": 4.728245951139171e-05,
"loss": 1.7366,
"step": 200
},
{
"epoch": 0.08234971177600878,
"grad_norm": 1.8302088975906372,
"learning_rate": 4.59099643151249e-05,
"loss": 1.7086,
"step": 300
},
{
"epoch": 0.10979961570134504,
"grad_norm": 1.9259424209594727,
"learning_rate": 4.453746911885809e-05,
"loss": 1.688,
"step": 400
},
{
"epoch": 0.1372495196266813,
"grad_norm": 1.6357027292251587,
"learning_rate": 4.316497392259127e-05,
"loss": 1.7272,
"step": 500
},
{
"epoch": 0.16469942355201755,
"grad_norm": 1.7630338668823242,
"learning_rate": 4.179247872632446e-05,
"loss": 1.7032,
"step": 600
},
{
"epoch": 0.19214932747735383,
"grad_norm": 2.0980162620544434,
"learning_rate": 4.041998353005765e-05,
"loss": 1.7523,
"step": 700
},
{
"epoch": 0.21959923140269008,
"grad_norm": 1.4029771089553833,
"learning_rate": 3.904748833379083e-05,
"loss": 1.6537,
"step": 800
},
{
"epoch": 0.24704913532802636,
"grad_norm": 1.5694669485092163,
"learning_rate": 3.767499313752402e-05,
"loss": 1.7099,
"step": 900
},
{
"epoch": 0.2744990392533626,
"grad_norm": 1.5211464166641235,
"learning_rate": 3.630249794125721e-05,
"loss": 1.688,
"step": 1000
},
{
"epoch": 0.30194894317869886,
"grad_norm": 1.527065396308899,
"learning_rate": 3.493000274499039e-05,
"loss": 1.6995,
"step": 1100
},
{
"epoch": 0.3293988471040351,
"grad_norm": 1.5144612789154053,
"learning_rate": 3.355750754872358e-05,
"loss": 1.6237,
"step": 1200
},
{
"epoch": 0.3568487510293714,
"grad_norm": 1.7216092348098755,
"learning_rate": 3.218501235245677e-05,
"loss": 1.6544,
"step": 1300
},
{
"epoch": 0.38429865495470766,
"grad_norm": 1.5754574537277222,
"learning_rate": 3.081251715618996e-05,
"loss": 1.6567,
"step": 1400
},
{
"epoch": 0.4117485588800439,
"grad_norm": 1.5684128999710083,
"learning_rate": 2.944002195992314e-05,
"loss": 1.6806,
"step": 1500
},
{
"epoch": 0.43919846280538016,
"grad_norm": 1.3534952402114868,
"learning_rate": 2.8067526763656326e-05,
"loss": 1.5953,
"step": 1600
},
{
"epoch": 0.46664836673071647,
"grad_norm": 1.4534939527511597,
"learning_rate": 2.669503156738952e-05,
"loss": 1.6146,
"step": 1700
},
{
"epoch": 0.4940982706560527,
"grad_norm": 1.3850529193878174,
"learning_rate": 2.53225363711227e-05,
"loss": 1.5718,
"step": 1800
},
{
"epoch": 0.5215481745813889,
"grad_norm": 1.536191701889038,
"learning_rate": 2.395004117485589e-05,
"loss": 1.6358,
"step": 1900
},
{
"epoch": 0.5489980785067252,
"grad_norm": 1.4949287176132202,
"learning_rate": 2.2577545978589075e-05,
"loss": 1.6173,
"step": 2000
},
{
"epoch": 0.5764479824320615,
"grad_norm": 1.489922046661377,
"learning_rate": 2.1205050782322264e-05,
"loss": 1.6291,
"step": 2100
},
{
"epoch": 0.6038978863573977,
"grad_norm": 2.090135097503662,
"learning_rate": 1.983255558605545e-05,
"loss": 1.6579,
"step": 2200
},
{
"epoch": 0.631347790282734,
"grad_norm": 1.798261284828186,
"learning_rate": 1.8460060389788635e-05,
"loss": 1.7008,
"step": 2300
},
{
"epoch": 0.6587976942080702,
"grad_norm": 1.43966805934906,
"learning_rate": 1.7087565193521824e-05,
"loss": 1.5593,
"step": 2400
},
{
"epoch": 0.6862475981334065,
"grad_norm": 1.5113532543182373,
"learning_rate": 1.571506999725501e-05,
"loss": 1.576,
"step": 2500
},
{
"epoch": 0.7136975020587428,
"grad_norm": 1.5978457927703857,
"learning_rate": 1.4342574800988198e-05,
"loss": 1.6075,
"step": 2600
},
{
"epoch": 0.741147405984079,
"grad_norm": 1.7422983646392822,
"learning_rate": 1.2970079604721382e-05,
"loss": 1.634,
"step": 2700
},
{
"epoch": 0.7685973099094153,
"grad_norm": 1.7618128061294556,
"learning_rate": 1.1597584408454571e-05,
"loss": 1.6333,
"step": 2800
},
{
"epoch": 0.7960472138347516,
"grad_norm": 1.4939252138137817,
"learning_rate": 1.0225089212187758e-05,
"loss": 1.5678,
"step": 2900
},
{
"epoch": 0.8234971177600878,
"grad_norm": 1.3491543531417847,
"learning_rate": 8.852594015920946e-06,
"loss": 1.5577,
"step": 3000
},
{
"epoch": 0.8509470216854241,
"grad_norm": 1.800729751586914,
"learning_rate": 7.480098819654131e-06,
"loss": 1.5562,
"step": 3100
},
{
"epoch": 0.8783969256107603,
"grad_norm": 1.4192384481430054,
"learning_rate": 6.107603623387318e-06,
"loss": 1.6218,
"step": 3200
},
{
"epoch": 0.9058468295360966,
"grad_norm": 1.487561583518982,
"learning_rate": 4.735108427120506e-06,
"loss": 1.6313,
"step": 3300
},
{
"epoch": 0.9332967334614329,
"grad_norm": 1.393309235572815,
"learning_rate": 3.3626132308536925e-06,
"loss": 1.5928,
"step": 3400
},
{
"epoch": 0.9607466373867691,
"grad_norm": 1.397757649421692,
"learning_rate": 1.990118034586879e-06,
"loss": 1.568,
"step": 3500
},
{
"epoch": 0.9881965413121054,
"grad_norm": 1.2685039043426514,
"learning_rate": 6.176228383200659e-07,
"loss": 1.5983,
"step": 3600
}
],
"logging_steps": 100,
"max_steps": 3643,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3643,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1045534257958093e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}