ultravox-gemma-2-2b-jpn-it / trainer_state.json
googlefan's picture
Upload folder using huggingface_hub
48cf47f verified
raw
history blame
12.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.6021505376344085,
"eval_steps": 1000,
"global_step": 7200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011947431302270011,
"grad_norm": 23.375,
"learning_rate": 2e-06,
"loss": 2.4074,
"step": 1
},
{
"epoch": 0.11947431302270012,
"grad_norm": 0.515625,
"learning_rate": 0.0002,
"loss": 0.7151,
"step": 100
},
{
"epoch": 0.23894862604540024,
"grad_norm": 0.369140625,
"learning_rate": 0.0004,
"loss": 0.4333,
"step": 200
},
{
"epoch": 0.35842293906810035,
"grad_norm": 0.361328125,
"learning_rate": 0.0006,
"loss": 0.4113,
"step": 300
},
{
"epoch": 0.4778972520908005,
"grad_norm": 0.244140625,
"learning_rate": 0.0008,
"loss": 0.393,
"step": 400
},
{
"epoch": 0.5973715651135006,
"grad_norm": 0.2099609375,
"learning_rate": 0.001,
"loss": 0.3857,
"step": 500
},
{
"epoch": 0.7168458781362007,
"grad_norm": 0.193359375,
"learning_rate": 0.0012,
"loss": 0.3734,
"step": 600
},
{
"epoch": 0.8363201911589009,
"grad_norm": 0.1796875,
"learning_rate": 0.0014,
"loss": 0.373,
"step": 700
},
{
"epoch": 0.955794504181601,
"grad_norm": 0.126953125,
"learning_rate": 0.0016,
"loss": 0.3578,
"step": 800
},
{
"epoch": 1.075268817204301,
"grad_norm": 0.111328125,
"learning_rate": 0.0018000000000000002,
"loss": 0.3473,
"step": 900
},
{
"epoch": 1.1947431302270013,
"grad_norm": 0.125,
"learning_rate": 0.002,
"loss": 0.3551,
"step": 1000
},
{
"epoch": 1.3142174432497014,
"grad_norm": 0.1591796875,
"learning_rate": 0.0019987165071710528,
"loss": 0.3466,
"step": 1100
},
{
"epoch": 1.4336917562724014,
"grad_norm": 0.099609375,
"learning_rate": 0.001994869323391895,
"loss": 0.3251,
"step": 1200
},
{
"epoch": 1.5531660692951015,
"grad_norm": 0.10888671875,
"learning_rate": 0.0019884683243281114,
"loss": 0.3183,
"step": 1300
},
{
"epoch": 1.6726403823178018,
"grad_norm": 0.1337890625,
"learning_rate": 0.0019795299412524946,
"loss": 0.3006,
"step": 1400
},
{
"epoch": 1.7921146953405018,
"grad_norm": 0.1494140625,
"learning_rate": 0.001968077118866204,
"loss": 0.2911,
"step": 1500
},
{
"epoch": 1.911589008363202,
"grad_norm": 0.1201171875,
"learning_rate": 0.001954139256400049,
"loss": 0.2853,
"step": 1600
},
{
"epoch": 2.031063321385902,
"grad_norm": 0.08154296875,
"learning_rate": 0.0019377521321470807,
"loss": 0.2585,
"step": 1700
},
{
"epoch": 2.150537634408602,
"grad_norm": 0.12890625,
"learning_rate": 0.0019189578116202307,
"loss": 0.2611,
"step": 1800
},
{
"epoch": 2.270011947431302,
"grad_norm": 0.125,
"learning_rate": 0.0018978045395707417,
"loss": 0.2502,
"step": 1900
},
{
"epoch": 2.3894862604540026,
"grad_norm": 0.1162109375,
"learning_rate": 0.0018743466161445822,
"loss": 0.2455,
"step": 2000
},
{
"epoch": 2.5089605734767026,
"grad_norm": 0.1181640625,
"learning_rate": 0.001848644257494751,
"loss": 0.2292,
"step": 2100
},
{
"epoch": 2.6284348864994027,
"grad_norm": 0.1318359375,
"learning_rate": 0.0018207634412072764,
"loss": 0.2199,
"step": 2200
},
{
"epoch": 2.7479091995221028,
"grad_norm": 0.12060546875,
"learning_rate": 0.0017907757369376985,
"loss": 0.203,
"step": 2300
},
{
"epoch": 2.867383512544803,
"grad_norm": 0.1064453125,
"learning_rate": 0.0017587581226927908,
"loss": 0.2035,
"step": 2400
},
{
"epoch": 2.986857825567503,
"grad_norm": 0.09228515625,
"learning_rate": 0.00172479278722912,
"loss": 0.1938,
"step": 2500
},
{
"epoch": 3.106332138590203,
"grad_norm": 0.095703125,
"learning_rate": 0.0016889669190756867,
"loss": 0.1868,
"step": 2600
},
{
"epoch": 3.225806451612903,
"grad_norm": 0.07666015625,
"learning_rate": 0.0016513724827222226,
"loss": 0.1884,
"step": 2700
},
{
"epoch": 3.3452807646356035,
"grad_norm": 0.08837890625,
"learning_rate": 0.001612105982547663,
"loss": 0.1772,
"step": 2800
},
{
"epoch": 3.4647550776583036,
"grad_norm": 0.10693359375,
"learning_rate": 0.0015712682150947923,
"loss": 0.1694,
"step": 2900
},
{
"epoch": 3.5842293906810037,
"grad_norm": 0.0927734375,
"learning_rate": 0.0015289640103269624,
"loss": 0.1616,
"step": 3000
},
{
"epoch": 3.7037037037037037,
"grad_norm": 0.06201171875,
"learning_rate": 0.0014853019625310814,
"loss": 0.1519,
"step": 3100
},
{
"epoch": 3.823178016726404,
"grad_norm": 0.2275390625,
"learning_rate": 0.0014403941515576345,
"loss": 0.1488,
"step": 3200
},
{
"epoch": 3.942652329749104,
"grad_norm": 0.09765625,
"learning_rate": 0.0013943558551133186,
"loss": 0.1455,
"step": 3300
},
{
"epoch": 4.062126642771804,
"grad_norm": 0.080078125,
"learning_rate": 0.0013473052528448202,
"loss": 0.1354,
"step": 3400
},
{
"epoch": 4.181600955794504,
"grad_norm": 0.08349609375,
"learning_rate": 0.0012993631229733583,
"loss": 0.1386,
"step": 3500
},
{
"epoch": 4.301075268817204,
"grad_norm": 0.095703125,
"learning_rate": 0.0012506525322587205,
"loss": 0.1355,
"step": 3600
},
{
"epoch": 4.4205495818399045,
"grad_norm": 0.06982421875,
"learning_rate": 0.0012012985200886602,
"loss": 0.1246,
"step": 3700
},
{
"epoch": 4.540023894862604,
"grad_norm": 0.07958984375,
"learning_rate": 0.0011514277775045768,
"loss": 0.1213,
"step": 3800
},
{
"epoch": 4.659498207885305,
"grad_norm": 0.0810546875,
"learning_rate": 0.0011011683219874322,
"loss": 0.1157,
"step": 3900
},
{
"epoch": 4.778972520908005,
"grad_norm": 0.07763671875,
"learning_rate": 0.0010506491688387128,
"loss": 0.1106,
"step": 4000
},
{
"epoch": 4.898446833930705,
"grad_norm": 0.06591796875,
"learning_rate": 0.001,
"loss": 0.109,
"step": 4100
},
{
"epoch": 5.017921146953405,
"grad_norm": 0.08935546875,
"learning_rate": 0.0009493508311612874,
"loss": 0.1051,
"step": 4200
},
{
"epoch": 5.137395459976105,
"grad_norm": 0.0771484375,
"learning_rate": 0.000898831678012568,
"loss": 0.1028,
"step": 4300
},
{
"epoch": 5.256869772998805,
"grad_norm": 0.08056640625,
"learning_rate": 0.0008485722224954237,
"loss": 0.1024,
"step": 4400
},
{
"epoch": 5.376344086021505,
"grad_norm": 0.08837890625,
"learning_rate": 0.0007987014799113398,
"loss": 0.0981,
"step": 4500
},
{
"epoch": 5.4958183990442055,
"grad_norm": 0.09228515625,
"learning_rate": 0.0007493474677412793,
"loss": 0.0937,
"step": 4600
},
{
"epoch": 5.615292712066906,
"grad_norm": 0.0712890625,
"learning_rate": 0.000700636877026642,
"loss": 0.0908,
"step": 4700
},
{
"epoch": 5.734767025089606,
"grad_norm": 0.06298828125,
"learning_rate": 0.0006526947471551798,
"loss": 0.0865,
"step": 4800
},
{
"epoch": 5.854241338112306,
"grad_norm": 0.06396484375,
"learning_rate": 0.0006056441448866817,
"loss": 0.0867,
"step": 4900
},
{
"epoch": 5.973715651135006,
"grad_norm": 0.056884765625,
"learning_rate": 0.0005596058484423656,
"loss": 0.0849,
"step": 5000
},
{
"epoch": 6.093189964157706,
"grad_norm": 0.06640625,
"learning_rate": 0.0005146980374689192,
"loss": 0.0812,
"step": 5100
},
{
"epoch": 6.212664277180406,
"grad_norm": 0.08984375,
"learning_rate": 0.0004710359896730378,
"loss": 0.0825,
"step": 5200
},
{
"epoch": 6.332138590203106,
"grad_norm": 0.057373046875,
"learning_rate": 0.00042873178490520746,
"loss": 0.0818,
"step": 5300
},
{
"epoch": 6.451612903225806,
"grad_norm": 0.057861328125,
"learning_rate": 0.0003878940174523371,
"loss": 0.0769,
"step": 5400
},
{
"epoch": 6.571087216248507,
"grad_norm": 0.05517578125,
"learning_rate": 0.00034862751727777797,
"loss": 0.0768,
"step": 5500
},
{
"epoch": 6.690561529271207,
"grad_norm": 0.056396484375,
"learning_rate": 0.00031103308092431337,
"loss": 0.0749,
"step": 5600
},
{
"epoch": 6.810035842293907,
"grad_norm": 0.044189453125,
"learning_rate": 0.00027520721277088026,
"loss": 0.0746,
"step": 5700
},
{
"epoch": 6.929510155316607,
"grad_norm": 0.041015625,
"learning_rate": 0.00024124187730720914,
"loss": 0.0748,
"step": 5800
},
{
"epoch": 7.048984468339307,
"grad_norm": 0.047607421875,
"learning_rate": 0.00020922426306230158,
"loss": 0.0736,
"step": 5900
},
{
"epoch": 7.168458781362007,
"grad_norm": 0.047607421875,
"learning_rate": 0.00017923655879272394,
"loss": 0.0743,
"step": 6000
},
{
"epoch": 7.287933094384707,
"grad_norm": 0.06787109375,
"learning_rate": 0.00015135574250524898,
"loss": 0.076,
"step": 6100
},
{
"epoch": 7.407407407407407,
"grad_norm": 0.06201171875,
"learning_rate": 0.00012565338385541792,
"loss": 0.0736,
"step": 6200
},
{
"epoch": 7.526881720430108,
"grad_norm": 0.047607421875,
"learning_rate": 0.00010219546042925842,
"loss": 0.0728,
"step": 6300
},
{
"epoch": 7.646356033452808,
"grad_norm": 0.045654296875,
"learning_rate": 8.10421883797694e-05,
"loss": 0.0725,
"step": 6400
},
{
"epoch": 7.765830346475508,
"grad_norm": 0.05126953125,
"learning_rate": 6.22478678529197e-05,
"loss": 0.0717,
"step": 6500
},
{
"epoch": 7.885304659498208,
"grad_norm": 0.046875,
"learning_rate": 4.5860743599951184e-05,
"loss": 0.0726,
"step": 6600
},
{
"epoch": 1.004778972520908,
"grad_norm": 0.050048828125,
"learning_rate": 3.192288113379582e-05,
"loss": 0.0732,
"step": 6700
},
{
"epoch": 1.124253285543608,
"grad_norm": 0.0498046875,
"learning_rate": 2.0470058747505516e-05,
"loss": 0.0724,
"step": 6800
},
{
"epoch": 1.2437275985663083,
"grad_norm": 0.050537109375,
"learning_rate": 1.153167567188862e-05,
"loss": 0.0748,
"step": 6900
},
{
"epoch": 1.3632019115890084,
"grad_norm": 0.04443359375,
"learning_rate": 5.1306766081048454e-06,
"loss": 0.0745,
"step": 7000
},
{
"epoch": 1.4826762246117084,
"grad_norm": 0.050048828125,
"learning_rate": 1.2834928289472415e-06,
"loss": 0.0717,
"step": 7100
},
{
"epoch": 1.6021505376344085,
"grad_norm": 0.0458984375,
"learning_rate": 0.0,
"loss": 0.073,
"step": 7200
}
],
"logging_steps": 100,
"max_steps": 7200,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.018481533644288e+17,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}