Tinyllama-1.5B-Cinder-Test-4 / trainer_state.json
Josephgflowers's picture
Upload 12 files
6a54924 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.056700482783555585,
"eval_steps": 500,
"global_step": 2666,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.9096835851669312,
"learning_rate": 4.989366000978328e-05,
"loss": 0.5573,
"step": 100
},
{
"epoch": 0.0,
"grad_norm": 0.8342915177345276,
"learning_rate": 4.978732001956656e-05,
"loss": 0.5416,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 1.401437759399414,
"learning_rate": 4.968098002934984e-05,
"loss": 0.5475,
"step": 300
},
{
"epoch": 0.01,
"grad_norm": 0.7456225156784058,
"learning_rate": 4.957464003913312e-05,
"loss": 0.5302,
"step": 400
},
{
"epoch": 0.01,
"grad_norm": 0.966705858707428,
"learning_rate": 4.94683000489164e-05,
"loss": 0.5257,
"step": 500
},
{
"epoch": 0.01,
"grad_norm": 0.8829948902130127,
"learning_rate": 4.936302345860184e-05,
"loss": 0.5185,
"step": 600
},
{
"epoch": 0.01,
"grad_norm": 0.8967320322990417,
"learning_rate": 4.925668346838512e-05,
"loss": 0.5195,
"step": 700
},
{
"epoch": 0.02,
"grad_norm": 0.8852934241294861,
"learning_rate": 4.9151406878070574e-05,
"loss": 0.5117,
"step": 800
},
{
"epoch": 0.02,
"grad_norm": 1.0393266677856445,
"learning_rate": 4.9045066887853846e-05,
"loss": 0.5027,
"step": 900
},
{
"epoch": 0.02,
"grad_norm": 1.016802191734314,
"learning_rate": 4.8938726897637125e-05,
"loss": 0.5099,
"step": 1000
},
{
"epoch": 0.02,
"grad_norm": 1.0666687488555908,
"learning_rate": 4.8832386907420405e-05,
"loss": 0.5129,
"step": 1100
},
{
"epoch": 0.03,
"grad_norm": 1.0762836933135986,
"learning_rate": 4.872604691720369e-05,
"loss": 0.5277,
"step": 1200
},
{
"epoch": 0.03,
"grad_norm": 0.8559880256652832,
"learning_rate": 4.861970692698696e-05,
"loss": 0.4987,
"step": 1300
},
{
"epoch": 0.03,
"grad_norm": 1.0834448337554932,
"learning_rate": 4.851336693677024e-05,
"loss": 0.5171,
"step": 1400
},
{
"epoch": 0.03,
"grad_norm": 0.9118117690086365,
"learning_rate": 4.840702694655352e-05,
"loss": 0.5073,
"step": 1500
},
{
"epoch": 0.03,
"grad_norm": 1.029111623764038,
"learning_rate": 4.830068695633681e-05,
"loss": 0.5085,
"step": 1600
},
{
"epoch": 0.04,
"grad_norm": 0.9440446496009827,
"learning_rate": 4.819434696612008e-05,
"loss": 0.4978,
"step": 1700
},
{
"epoch": 0.04,
"grad_norm": 0.8061498403549194,
"learning_rate": 4.808800697590336e-05,
"loss": 0.4859,
"step": 1800
},
{
"epoch": 0.04,
"grad_norm": 1.1587491035461426,
"learning_rate": 4.798166698568664e-05,
"loss": 0.5029,
"step": 1900
},
{
"epoch": 0.04,
"grad_norm": 0.8422715067863464,
"learning_rate": 4.787532699546992e-05,
"loss": 0.507,
"step": 2000
},
{
"epoch": 0.04,
"grad_norm": 1.0634592771530151,
"learning_rate": 4.77689870052532e-05,
"loss": 0.4967,
"step": 2100
},
{
"epoch": 0.05,
"grad_norm": 1.1594743728637695,
"learning_rate": 4.7662647015036477e-05,
"loss": 0.4993,
"step": 2200
},
{
"epoch": 0.05,
"grad_norm": 1.0729972124099731,
"learning_rate": 4.7556307024819756e-05,
"loss": 0.496,
"step": 2300
},
{
"epoch": 0.05,
"grad_norm": 1.0069080591201782,
"learning_rate": 4.7449967034603035e-05,
"loss": 0.5016,
"step": 2400
},
{
"epoch": 0.05,
"grad_norm": 0.9722279906272888,
"learning_rate": 4.7343627044386314e-05,
"loss": 0.4883,
"step": 2500
},
{
"epoch": 0.06,
"grad_norm": 0.760681688785553,
"learning_rate": 4.7237287054169594e-05,
"loss": 0.4899,
"step": 2600
}
],
"logging_steps": 100,
"max_steps": 47019,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1333,
"total_flos": 7.500219014216417e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}