gemma4-26b-securecode / checkpoint-121 /trainer_state.json
scthornton's picture
Upload folder using huggingface_hub
a3f8343 verified
{
"best_global_step": 121,
"best_metric": 0.4985087513923645,
"best_model_checkpoint": "/workspace/gemma4-26b-securecode/checkpoint-121",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 121,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"entropy": 1.0907821020111441,
"epoch": 0.0827300930713547,
"grad_norm": 20.875,
"learning_rate": 1.8e-05,
"loss": 80.26775512695312,
"mean_token_accuracy": 0.4542873948812485,
"num_tokens": 326185.0,
"step": 10
},
{
"entropy": 0.8271314173936843,
"epoch": 0.1654601861427094,
"grad_norm": 8.75,
"learning_rate": 3.8e-05,
"loss": 58.08096923828125,
"mean_token_accuracy": 0.5611657274886965,
"num_tokens": 653865.0,
"step": 20
},
{
"entropy": 0.4787554959766567,
"epoch": 0.2481902792140641,
"grad_norm": 1.7109375,
"learning_rate": 5.8e-05,
"loss": 25.493240356445312,
"mean_token_accuracy": 0.7378443486988544,
"num_tokens": 981337.0,
"step": 30
},
{
"entropy": 0.7855595085769892,
"epoch": 0.3309203722854188,
"grad_norm": 0.8671875,
"learning_rate": 7.800000000000001e-05,
"loss": 14.629072570800782,
"mean_token_accuracy": 0.7917733617126942,
"num_tokens": 1308584.0,
"step": 40
},
{
"entropy": 0.7569877350702882,
"epoch": 0.4136504653567735,
"grad_norm": 2.109375,
"learning_rate": 9.8e-05,
"loss": 12.609142303466797,
"mean_token_accuracy": 0.8013272784650326,
"num_tokens": 1635098.0,
"step": 50
},
{
"entropy": 0.6735223602503538,
"epoch": 0.4963805584281282,
"grad_norm": 16.875,
"learning_rate": 0.000118,
"loss": 10.704925537109375,
"mean_token_accuracy": 0.8209844313561916,
"num_tokens": 1962302.0,
"step": 60
},
{
"entropy": 0.6005677949637175,
"epoch": 0.5791106514994829,
"grad_norm": 1.546875,
"learning_rate": 0.000138,
"loss": 9.783185577392578,
"mean_token_accuracy": 0.8308866504579783,
"num_tokens": 2289982.0,
"step": 70
},
{
"entropy": 0.5877057909965515,
"epoch": 0.6618407445708376,
"grad_norm": 11.25,
"learning_rate": 0.00015800000000000002,
"loss": 9.298844909667968,
"mean_token_accuracy": 0.8359990835189819,
"num_tokens": 2616786.0,
"step": 80
},
{
"entropy": 0.5447238819673658,
"epoch": 0.7445708376421923,
"grad_norm": 1.2890625,
"learning_rate": 0.00017800000000000002,
"loss": 8.777264404296876,
"mean_token_accuracy": 0.8440194871276617,
"num_tokens": 2941975.0,
"step": 90
},
{
"entropy": 0.5323287105187774,
"epoch": 0.827300930713547,
"grad_norm": 0.70703125,
"learning_rate": 0.00019800000000000002,
"loss": 8.489185333251953,
"mean_token_accuracy": 0.8486687760800123,
"num_tokens": 3269655.0,
"step": 100
},
{
"entropy": 0.4949887519702315,
"epoch": 0.9100310237849017,
"grad_norm": 0.439453125,
"learning_rate": 0.00019942266891397815,
"loss": 8.192723083496094,
"mean_token_accuracy": 0.8528529018163681,
"num_tokens": 3595193.0,
"step": 110
},
{
"entropy": 0.4980895221233368,
"epoch": 0.9927611168562565,
"grad_norm": 0.921875,
"learning_rate": 0.00019743551343638324,
"loss": 7.908926391601563,
"mean_token_accuracy": 0.8567473825067282,
"num_tokens": 3922475.0,
"step": 120
},
{
"epoch": 1.0,
"eval_entropy": 0.5629051625728607,
"eval_loss": 0.4985087513923645,
"eval_mean_token_accuracy": 0.8571729124978531,
"eval_num_tokens": 3949618.0,
"eval_runtime": 122.3216,
"eval_samples_per_second": 1.758,
"eval_steps_per_second": 1.758,
"step": 121
}
],
"logging_steps": 10,
"max_steps": 363,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.944883327916494e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}