Roleplay-LoRA / Qwen1.5-7B-Chat-LoRA /trainer_state.json
ukk0708's picture
Upload 14 files
64b1090 verified
raw
history blame contribute delete
No virus
20.2 kB
{
"best_metric": 2.269778251647949,
"best_model_checkpoint": "../../saves/Qwen1.5-7B-Chat/lora/sft/checkpoint-1200",
"epoch": 7.111111111111111,
"eval_steps": 400,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"grad_norm": 2.701119899749756,
"learning_rate": 2.5e-05,
"loss": 4.3102,
"step": 10
},
{
"epoch": 0.12,
"grad_norm": 3.890558958053589,
"learning_rate": 5e-05,
"loss": 3.8998,
"step": 20
},
{
"epoch": 0.18,
"grad_norm": 1.3287036418914795,
"learning_rate": 4.999552306674344e-05,
"loss": 3.241,
"step": 30
},
{
"epoch": 0.24,
"grad_norm": 0.6666322946548462,
"learning_rate": 4.998209387040829e-05,
"loss": 2.677,
"step": 40
},
{
"epoch": 0.3,
"grad_norm": 0.6145800352096558,
"learning_rate": 4.9959717220723784e-05,
"loss": 2.6172,
"step": 50
},
{
"epoch": 0.36,
"grad_norm": 0.5234311819076538,
"learning_rate": 4.9928401131991305e-05,
"loss": 2.5621,
"step": 60
},
{
"epoch": 0.41,
"grad_norm": 0.6433086395263672,
"learning_rate": 4.9888156820213974e-05,
"loss": 2.587,
"step": 70
},
{
"epoch": 0.47,
"grad_norm": 0.5955637693405151,
"learning_rate": 4.9838998699079625e-05,
"loss": 2.6315,
"step": 80
},
{
"epoch": 0.53,
"grad_norm": 0.5844753980636597,
"learning_rate": 4.9780944374798435e-05,
"loss": 2.5056,
"step": 90
},
{
"epoch": 0.59,
"grad_norm": 0.6106901168823242,
"learning_rate": 4.971401463979721e-05,
"loss": 2.5099,
"step": 100
},
{
"epoch": 0.65,
"grad_norm": 0.7099446654319763,
"learning_rate": 4.963823346527248e-05,
"loss": 2.4891,
"step": 110
},
{
"epoch": 0.71,
"grad_norm": 0.6092314720153809,
"learning_rate": 4.9553627992605066e-05,
"loss": 2.546,
"step": 120
},
{
"epoch": 0.77,
"grad_norm": 0.5691718459129333,
"learning_rate": 4.946022852363932e-05,
"loss": 2.4562,
"step": 130
},
{
"epoch": 0.83,
"grad_norm": 0.7633320689201355,
"learning_rate": 4.9358068509830334e-05,
"loss": 2.5554,
"step": 140
},
{
"epoch": 0.89,
"grad_norm": 0.7102218866348267,
"learning_rate": 4.924718454026318e-05,
"loss": 2.5479,
"step": 150
},
{
"epoch": 0.95,
"grad_norm": 0.7560188174247742,
"learning_rate": 4.912761632854833e-05,
"loss": 2.4747,
"step": 160
},
{
"epoch": 1.01,
"grad_norm": 0.6717157959938049,
"learning_rate": 4.8999406698598074e-05,
"loss": 2.5173,
"step": 170
},
{
"epoch": 1.07,
"grad_norm": 0.6935433745384216,
"learning_rate": 4.886260156928888e-05,
"loss": 2.4288,
"step": 180
},
{
"epoch": 1.13,
"grad_norm": 1.0115560293197632,
"learning_rate": 4.8717249938015415e-05,
"loss": 2.5331,
"step": 190
},
{
"epoch": 1.19,
"grad_norm": 0.7541220784187317,
"learning_rate": 4.856340386314182e-05,
"loss": 2.4577,
"step": 200
},
{
"epoch": 1.24,
"grad_norm": 0.8513575196266174,
"learning_rate": 4.840111844535682e-05,
"loss": 2.496,
"step": 210
},
{
"epoch": 1.3,
"grad_norm": 0.7479698061943054,
"learning_rate": 4.8230451807939135e-05,
"loss": 2.4142,
"step": 220
},
{
"epoch": 1.36,
"grad_norm": 0.7310675978660583,
"learning_rate": 4.8051465075940336e-05,
"loss": 2.4913,
"step": 230
},
{
"epoch": 1.42,
"grad_norm": 0.8907930850982666,
"learning_rate": 4.786422235429269e-05,
"loss": 2.3697,
"step": 240
},
{
"epoch": 1.48,
"grad_norm": 0.9348196387290955,
"learning_rate": 4.766879070484956e-05,
"loss": 2.4631,
"step": 250
},
{
"epoch": 1.54,
"grad_norm": 0.9181344509124756,
"learning_rate": 4.746524012236706e-05,
"loss": 2.4411,
"step": 260
},
{
"epoch": 1.6,
"grad_norm": 0.7747082710266113,
"learning_rate": 4.725364350943492e-05,
"loss": 2.3592,
"step": 270
},
{
"epoch": 1.66,
"grad_norm": 0.9620688557624817,
"learning_rate": 4.703407665036622e-05,
"loss": 2.3433,
"step": 280
},
{
"epoch": 1.72,
"grad_norm": 1.016157865524292,
"learning_rate": 4.680661818405485e-05,
"loss": 2.3396,
"step": 290
},
{
"epoch": 1.78,
"grad_norm": 0.8236090540885925,
"learning_rate": 4.657134957581057e-05,
"loss": 2.3637,
"step": 300
},
{
"epoch": 1.84,
"grad_norm": 0.9651027321815491,
"learning_rate": 4.6328355088181915e-05,
"loss": 2.3831,
"step": 310
},
{
"epoch": 1.9,
"grad_norm": 0.7949943542480469,
"learning_rate": 4.607772175077711e-05,
"loss": 2.4422,
"step": 320
},
{
"epoch": 1.96,
"grad_norm": 0.9031904935836792,
"learning_rate": 4.581953932909403e-05,
"loss": 2.4595,
"step": 330
},
{
"epoch": 2.01,
"grad_norm": 0.859309732913971,
"learning_rate": 4.555390029237026e-05,
"loss": 2.3967,
"step": 340
},
{
"epoch": 2.07,
"grad_norm": 1.1613001823425293,
"learning_rate": 4.528089978046481e-05,
"loss": 2.3132,
"step": 350
},
{
"epoch": 2.13,
"grad_norm": 0.9616575837135315,
"learning_rate": 4.500063556978337e-05,
"loss": 2.4425,
"step": 360
},
{
"epoch": 2.19,
"grad_norm": 0.7751485109329224,
"learning_rate": 4.471320803825915e-05,
"loss": 2.3591,
"step": 370
},
{
"epoch": 2.25,
"grad_norm": 1.3079149723052979,
"learning_rate": 4.441872012940214e-05,
"loss": 2.3323,
"step": 380
},
{
"epoch": 2.31,
"grad_norm": 0.8369519710540771,
"learning_rate": 4.4117277315429366e-05,
"loss": 2.5005,
"step": 390
},
{
"epoch": 2.37,
"grad_norm": 1.0954837799072266,
"learning_rate": 4.380898755948953e-05,
"loss": 2.3058,
"step": 400
},
{
"epoch": 2.37,
"eval_loss": 2.299175500869751,
"eval_runtime": 74.4017,
"eval_samples_per_second": 8.064,
"eval_steps_per_second": 4.032,
"step": 400
},
{
"epoch": 2.43,
"grad_norm": 1.0771162509918213,
"learning_rate": 4.349396127699552e-05,
"loss": 2.4145,
"step": 410
},
{
"epoch": 2.49,
"grad_norm": 0.7209674119949341,
"learning_rate": 4.3172311296078595e-05,
"loss": 2.3519,
"step": 420
},
{
"epoch": 2.55,
"grad_norm": 1.0076887607574463,
"learning_rate": 4.284415281717847e-05,
"loss": 2.3699,
"step": 430
},
{
"epoch": 2.61,
"grad_norm": 0.9578609466552734,
"learning_rate": 4.250960337178377e-05,
"loss": 2.3923,
"step": 440
},
{
"epoch": 2.67,
"grad_norm": 0.9023878574371338,
"learning_rate": 4.216878278033753e-05,
"loss": 2.3618,
"step": 450
},
{
"epoch": 2.73,
"grad_norm": 1.1171501874923706,
"learning_rate": 4.1821813109322974e-05,
"loss": 2.4161,
"step": 460
},
{
"epoch": 2.79,
"grad_norm": 1.2200597524642944,
"learning_rate": 4.1468818627544845e-05,
"loss": 2.3592,
"step": 470
},
{
"epoch": 2.84,
"grad_norm": 0.9814207553863525,
"learning_rate": 4.1109925761621925e-05,
"loss": 2.4143,
"step": 480
},
{
"epoch": 2.9,
"grad_norm": 1.2403128147125244,
"learning_rate": 4.0745263050706784e-05,
"loss": 2.3558,
"step": 490
},
{
"epoch": 2.96,
"grad_norm": 0.9680503606796265,
"learning_rate": 4.037496110044884e-05,
"loss": 2.3103,
"step": 500
},
{
"epoch": 3.02,
"grad_norm": 0.9476011991500854,
"learning_rate": 3.999915253621739e-05,
"loss": 2.369,
"step": 510
},
{
"epoch": 3.08,
"grad_norm": 0.9985129237174988,
"learning_rate": 3.961797195560118e-05,
"loss": 2.2797,
"step": 520
},
{
"epoch": 3.14,
"grad_norm": 1.1295677423477173,
"learning_rate": 3.9231555880201655e-05,
"loss": 2.3331,
"step": 530
},
{
"epoch": 3.2,
"grad_norm": 0.854465126991272,
"learning_rate": 3.8840042706737114e-05,
"loss": 2.2353,
"step": 540
},
{
"epoch": 3.26,
"grad_norm": 0.9416671991348267,
"learning_rate": 3.8443572657475304e-05,
"loss": 2.285,
"step": 550
},
{
"epoch": 3.32,
"grad_norm": 1.1753567457199097,
"learning_rate": 3.804228773001212e-05,
"loss": 2.3359,
"step": 560
},
{
"epoch": 3.38,
"grad_norm": 1.0389409065246582,
"learning_rate": 3.7636331646414524e-05,
"loss": 2.3567,
"step": 570
},
{
"epoch": 3.44,
"grad_norm": 1.197994589805603,
"learning_rate": 3.7225849801745835e-05,
"loss": 2.3137,
"step": 580
},
{
"epoch": 3.5,
"grad_norm": 1.1205960512161255,
"learning_rate": 3.6810989211991774e-05,
"loss": 2.3268,
"step": 590
},
{
"epoch": 3.56,
"grad_norm": 1.1302969455718994,
"learning_rate": 3.639189846140604e-05,
"loss": 2.4254,
"step": 600
},
{
"epoch": 3.61,
"grad_norm": 1.1475387811660767,
"learning_rate": 3.596872764929413e-05,
"loss": 2.3282,
"step": 610
},
{
"epoch": 3.67,
"grad_norm": 1.2079755067825317,
"learning_rate": 3.55416283362546e-05,
"loss": 2.2859,
"step": 620
},
{
"epoch": 3.73,
"grad_norm": 1.274043321609497,
"learning_rate": 3.511075348989692e-05,
"loss": 2.3619,
"step": 630
},
{
"epoch": 3.79,
"grad_norm": 1.3108967542648315,
"learning_rate": 3.4676257430055434e-05,
"loss": 2.202,
"step": 640
},
{
"epoch": 3.85,
"grad_norm": 1.1571152210235596,
"learning_rate": 3.4238295773518924e-05,
"loss": 2.3114,
"step": 650
},
{
"epoch": 3.91,
"grad_norm": 1.2922154664993286,
"learning_rate": 3.379702537829583e-05,
"loss": 2.3175,
"step": 660
},
{
"epoch": 3.97,
"grad_norm": 0.8678541779518127,
"learning_rate": 3.335260428743475e-05,
"loss": 2.4348,
"step": 670
},
{
"epoch": 4.03,
"grad_norm": 1.2109763622283936,
"learning_rate": 3.29051916724206e-05,
"loss": 2.3685,
"step": 680
},
{
"epoch": 4.09,
"grad_norm": 1.21372389793396,
"learning_rate": 3.2454947776166636e-05,
"loss": 2.1795,
"step": 690
},
{
"epoch": 4.15,
"grad_norm": 1.2954655885696411,
"learning_rate": 3.200203385562268e-05,
"loss": 2.2299,
"step": 700
},
{
"epoch": 4.21,
"grad_norm": 1.4054641723632812,
"learning_rate": 3.154661212402017e-05,
"loss": 2.3114,
"step": 710
},
{
"epoch": 4.27,
"grad_norm": 1.2466893196105957,
"learning_rate": 3.10888456927748e-05,
"loss": 2.2702,
"step": 720
},
{
"epoch": 4.33,
"grad_norm": 1.0978355407714844,
"learning_rate": 3.0628898513067353e-05,
"loss": 2.2537,
"step": 730
},
{
"epoch": 4.39,
"grad_norm": 1.3766223192214966,
"learning_rate": 3.0166935317123823e-05,
"loss": 2.3131,
"step": 740
},
{
"epoch": 4.44,
"grad_norm": 1.152358889579773,
"learning_rate": 2.9703121559215845e-05,
"loss": 2.38,
"step": 750
},
{
"epoch": 4.5,
"grad_norm": 1.3584524393081665,
"learning_rate": 2.923762335640242e-05,
"loss": 2.3432,
"step": 760
},
{
"epoch": 4.56,
"grad_norm": 1.3569154739379883,
"learning_rate": 2.8770607429034352e-05,
"loss": 2.2632,
"step": 770
},
{
"epoch": 4.62,
"grad_norm": 1.4426939487457275,
"learning_rate": 2.8302241041042565e-05,
"loss": 2.2501,
"step": 780
},
{
"epoch": 4.68,
"grad_norm": 1.2213878631591797,
"learning_rate": 2.783269194003175e-05,
"loss": 2.3787,
"step": 790
},
{
"epoch": 4.74,
"grad_norm": 1.5808135271072388,
"learning_rate": 2.7362128297200785e-05,
"loss": 2.3647,
"step": 800
},
{
"epoch": 4.74,
"eval_loss": 2.2738687992095947,
"eval_runtime": 80.6321,
"eval_samples_per_second": 7.441,
"eval_steps_per_second": 3.721,
"step": 800
},
{
"epoch": 4.8,
"grad_norm": 1.2698041200637817,
"learning_rate": 2.6890718647111422e-05,
"loss": 2.2942,
"step": 810
},
{
"epoch": 4.86,
"grad_norm": 1.2770848274230957,
"learning_rate": 2.6418631827326857e-05,
"loss": 2.347,
"step": 820
},
{
"epoch": 4.92,
"grad_norm": 1.2166974544525146,
"learning_rate": 2.5946036917941762e-05,
"loss": 2.2969,
"step": 830
},
{
"epoch": 4.98,
"grad_norm": 1.4275230169296265,
"learning_rate": 2.5473103181025476e-05,
"loss": 2.2556,
"step": 840
},
{
"epoch": 5.04,
"grad_norm": 1.2852686643600464,
"learning_rate": 2.5e-05,
"loss": 2.2781,
"step": 850
},
{
"epoch": 5.1,
"grad_norm": 1.6887468099594116,
"learning_rate": 2.4526896818974533e-05,
"loss": 2.2843,
"step": 860
},
{
"epoch": 5.16,
"grad_norm": 1.328687310218811,
"learning_rate": 2.4053963082058244e-05,
"loss": 2.229,
"step": 870
},
{
"epoch": 5.21,
"grad_norm": 1.3591057062149048,
"learning_rate": 2.3581368172673152e-05,
"loss": 2.2851,
"step": 880
},
{
"epoch": 5.27,
"grad_norm": 1.3400282859802246,
"learning_rate": 2.310928135288859e-05,
"loss": 2.18,
"step": 890
},
{
"epoch": 5.33,
"grad_norm": 1.3457157611846924,
"learning_rate": 2.263787170279922e-05,
"loss": 2.2501,
"step": 900
},
{
"epoch": 5.39,
"grad_norm": 1.5690137147903442,
"learning_rate": 2.2167308059968254e-05,
"loss": 2.2682,
"step": 910
},
{
"epoch": 5.45,
"grad_norm": 1.429304599761963,
"learning_rate": 2.1697758958957448e-05,
"loss": 2.3144,
"step": 920
},
{
"epoch": 5.51,
"grad_norm": 1.4117275476455688,
"learning_rate": 2.1229392570965657e-05,
"loss": 2.2955,
"step": 930
},
{
"epoch": 5.57,
"grad_norm": 1.7679541110992432,
"learning_rate": 2.0762376643597582e-05,
"loss": 2.2117,
"step": 940
},
{
"epoch": 5.63,
"grad_norm": 1.4710173606872559,
"learning_rate": 2.029687844078416e-05,
"loss": 2.3107,
"step": 950
},
{
"epoch": 5.69,
"grad_norm": 1.2774254083633423,
"learning_rate": 1.9833064682876176e-05,
"loss": 2.2242,
"step": 960
},
{
"epoch": 5.75,
"grad_norm": 1.5879724025726318,
"learning_rate": 1.937110148693265e-05,
"loss": 2.2299,
"step": 970
},
{
"epoch": 5.81,
"grad_norm": 1.3109989166259766,
"learning_rate": 1.8911154307225203e-05,
"loss": 2.3555,
"step": 980
},
{
"epoch": 5.87,
"grad_norm": 1.6713744401931763,
"learning_rate": 1.8453387875979834e-05,
"loss": 2.2603,
"step": 990
},
{
"epoch": 5.93,
"grad_norm": 1.6011615991592407,
"learning_rate": 1.7997966144377325e-05,
"loss": 2.2731,
"step": 1000
},
{
"epoch": 5.99,
"grad_norm": 1.4683603048324585,
"learning_rate": 1.754505222383337e-05,
"loss": 2.2876,
"step": 1010
},
{
"epoch": 6.04,
"grad_norm": 1.3678452968597412,
"learning_rate": 1.70948083275794e-05,
"loss": 2.3123,
"step": 1020
},
{
"epoch": 6.1,
"grad_norm": 1.4179573059082031,
"learning_rate": 1.6647395712565256e-05,
"loss": 2.1601,
"step": 1030
},
{
"epoch": 6.16,
"grad_norm": 1.4710493087768555,
"learning_rate": 1.6202974621704175e-05,
"loss": 2.1918,
"step": 1040
},
{
"epoch": 6.22,
"grad_norm": 1.5775597095489502,
"learning_rate": 1.576170422648108e-05,
"loss": 2.3438,
"step": 1050
},
{
"epoch": 6.28,
"grad_norm": 1.7641725540161133,
"learning_rate": 1.5323742569944572e-05,
"loss": 2.1773,
"step": 1060
},
{
"epoch": 6.34,
"grad_norm": 1.5010628700256348,
"learning_rate": 1.4889246510103077e-05,
"loss": 2.2014,
"step": 1070
},
{
"epoch": 6.4,
"grad_norm": 1.5155980587005615,
"learning_rate": 1.4458371663745401e-05,
"loss": 2.2616,
"step": 1080
},
{
"epoch": 6.46,
"grad_norm": 1.337975025177002,
"learning_rate": 1.4031272350705871e-05,
"loss": 2.2648,
"step": 1090
},
{
"epoch": 6.52,
"grad_norm": 1.788812518119812,
"learning_rate": 1.3608101538593965e-05,
"loss": 2.2841,
"step": 1100
},
{
"epoch": 6.58,
"grad_norm": 1.6673487424850464,
"learning_rate": 1.3189010788008233e-05,
"loss": 2.3188,
"step": 1110
},
{
"epoch": 6.64,
"grad_norm": 1.4954336881637573,
"learning_rate": 1.277415019825417e-05,
"loss": 2.2268,
"step": 1120
},
{
"epoch": 6.7,
"grad_norm": 1.5983794927597046,
"learning_rate": 1.2363668353585487e-05,
"loss": 2.2214,
"step": 1130
},
{
"epoch": 6.76,
"grad_norm": 1.6327165365219116,
"learning_rate": 1.195771226998789e-05,
"loss": 2.2109,
"step": 1140
},
{
"epoch": 6.81,
"grad_norm": 1.7117351293563843,
"learning_rate": 1.1556427342524698e-05,
"loss": 2.3153,
"step": 1150
},
{
"epoch": 6.87,
"grad_norm": 1.8630892038345337,
"learning_rate": 1.1159957293262888e-05,
"loss": 2.216,
"step": 1160
},
{
"epoch": 6.93,
"grad_norm": 1.6396968364715576,
"learning_rate": 1.0768444119798357e-05,
"loss": 2.2552,
"step": 1170
},
{
"epoch": 6.99,
"grad_norm": 1.5091443061828613,
"learning_rate": 1.0382028044398822e-05,
"loss": 2.2435,
"step": 1180
},
{
"epoch": 7.05,
"grad_norm": 1.4590834379196167,
"learning_rate": 1.0000847463782615e-05,
"loss": 2.2492,
"step": 1190
},
{
"epoch": 7.11,
"grad_norm": 1.8363531827926636,
"learning_rate": 9.625038899551161e-06,
"loss": 2.1918,
"step": 1200
},
{
"epoch": 7.11,
"eval_loss": 2.269778251647949,
"eval_runtime": 74.1482,
"eval_samples_per_second": 8.092,
"eval_steps_per_second": 4.046,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 1680,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 400,
"total_flos": 1.1235201502895145e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}