BaoLuo-LawAssistant-sftglm-6b / trainer_state.json
xuanxuanzl's picture
Upload 15 files
4cd5804
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1204393023553411,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 9.9e-06,
"loss": 3.2945,
"step": 50
},
{
"epoch": 0.0,
"learning_rate": 9.800000000000001e-06,
"loss": 3.1789,
"step": 100
},
{
"epoch": 0.0,
"learning_rate": 9.7e-06,
"loss": 3.2237,
"step": 150
},
{
"epoch": 0.0,
"learning_rate": 9.600000000000001e-06,
"loss": 3.2381,
"step": 200
},
{
"epoch": 0.01,
"learning_rate": 9.5e-06,
"loss": 3.2048,
"step": 250
},
{
"epoch": 0.01,
"learning_rate": 9.4e-06,
"loss": 3.2129,
"step": 300
},
{
"epoch": 0.01,
"learning_rate": 9.3e-06,
"loss": 3.218,
"step": 350
},
{
"epoch": 0.01,
"learning_rate": 9.200000000000002e-06,
"loss": 3.1789,
"step": 400
},
{
"epoch": 0.01,
"learning_rate": 9.100000000000001e-06,
"loss": 3.2067,
"step": 450
},
{
"epoch": 0.01,
"learning_rate": 9e-06,
"loss": 3.1704,
"step": 500
},
{
"epoch": 0.01,
"learning_rate": 8.900000000000001e-06,
"loss": 3.1543,
"step": 550
},
{
"epoch": 0.01,
"learning_rate": 8.8e-06,
"loss": 3.1896,
"step": 600
},
{
"epoch": 0.02,
"learning_rate": 8.700000000000001e-06,
"loss": 3.1658,
"step": 650
},
{
"epoch": 0.02,
"learning_rate": 8.6e-06,
"loss": 3.2246,
"step": 700
},
{
"epoch": 0.02,
"learning_rate": 8.5e-06,
"loss": 3.1984,
"step": 750
},
{
"epoch": 0.02,
"learning_rate": 8.400000000000001e-06,
"loss": 3.1704,
"step": 800
},
{
"epoch": 0.02,
"learning_rate": 8.3e-06,
"loss": 3.2175,
"step": 850
},
{
"epoch": 0.02,
"learning_rate": 8.2e-06,
"loss": 3.1687,
"step": 900
},
{
"epoch": 0.02,
"learning_rate": 8.1e-06,
"loss": 3.1709,
"step": 950
},
{
"epoch": 0.02,
"learning_rate": 8.000000000000001e-06,
"loss": 3.1268,
"step": 1000
},
{
"epoch": 0.03,
"learning_rate": 7.9e-06,
"loss": 3.1634,
"step": 1050
},
{
"epoch": 0.03,
"learning_rate": 7.800000000000002e-06,
"loss": 3.0684,
"step": 1100
},
{
"epoch": 0.03,
"learning_rate": 7.7e-06,
"loss": 3.1026,
"step": 1150
},
{
"epoch": 0.03,
"learning_rate": 7.600000000000001e-06,
"loss": 3.0981,
"step": 1200
},
{
"epoch": 0.03,
"learning_rate": 7.500000000000001e-06,
"loss": 3.1482,
"step": 1250
},
{
"epoch": 0.03,
"learning_rate": 7.4e-06,
"loss": 3.0771,
"step": 1300
},
{
"epoch": 0.03,
"learning_rate": 7.3e-06,
"loss": 3.1484,
"step": 1350
},
{
"epoch": 0.03,
"learning_rate": 7.2000000000000005e-06,
"loss": 3.0779,
"step": 1400
},
{
"epoch": 0.03,
"learning_rate": 7.100000000000001e-06,
"loss": 3.1395,
"step": 1450
},
{
"epoch": 0.04,
"learning_rate": 7e-06,
"loss": 3.1112,
"step": 1500
},
{
"epoch": 0.04,
"learning_rate": 6.9e-06,
"loss": 3.087,
"step": 1550
},
{
"epoch": 0.04,
"learning_rate": 6.800000000000001e-06,
"loss": 3.1027,
"step": 1600
},
{
"epoch": 0.04,
"learning_rate": 6.700000000000001e-06,
"loss": 3.0695,
"step": 1650
},
{
"epoch": 0.04,
"learning_rate": 6.600000000000001e-06,
"loss": 3.1138,
"step": 1700
},
{
"epoch": 0.04,
"learning_rate": 6.5000000000000004e-06,
"loss": 3.1111,
"step": 1750
},
{
"epoch": 0.04,
"learning_rate": 6.4000000000000006e-06,
"loss": 3.0658,
"step": 1800
},
{
"epoch": 0.04,
"learning_rate": 6.300000000000001e-06,
"loss": 3.0523,
"step": 1850
},
{
"epoch": 0.05,
"learning_rate": 6.200000000000001e-06,
"loss": 3.0924,
"step": 1900
},
{
"epoch": 0.05,
"learning_rate": 6.1e-06,
"loss": 3.0793,
"step": 1950
},
{
"epoch": 0.05,
"learning_rate": 6e-06,
"loss": 3.0692,
"step": 2000
},
{
"epoch": 0.05,
"learning_rate": 5.9e-06,
"loss": 3.1456,
"step": 2050
},
{
"epoch": 0.05,
"learning_rate": 5.8e-06,
"loss": 3.0418,
"step": 2100
},
{
"epoch": 0.05,
"learning_rate": 5.7e-06,
"loss": 3.0855,
"step": 2150
},
{
"epoch": 0.05,
"learning_rate": 5.600000000000001e-06,
"loss": 3.074,
"step": 2200
},
{
"epoch": 0.05,
"learning_rate": 5.500000000000001e-06,
"loss": 3.0617,
"step": 2250
},
{
"epoch": 0.06,
"learning_rate": 5.400000000000001e-06,
"loss": 3.0639,
"step": 2300
},
{
"epoch": 0.06,
"learning_rate": 5.300000000000001e-06,
"loss": 3.0636,
"step": 2350
},
{
"epoch": 0.06,
"learning_rate": 5.2e-06,
"loss": 2.9724,
"step": 2400
},
{
"epoch": 0.06,
"learning_rate": 5.1e-06,
"loss": 3.0637,
"step": 2450
},
{
"epoch": 0.06,
"learning_rate": 5e-06,
"loss": 3.0511,
"step": 2500
},
{
"epoch": 0.06,
"learning_rate": 4.9000000000000005e-06,
"loss": 3.0514,
"step": 2550
},
{
"epoch": 0.06,
"learning_rate": 4.800000000000001e-06,
"loss": 3.0162,
"step": 2600
},
{
"epoch": 0.06,
"learning_rate": 4.7e-06,
"loss": 3.0779,
"step": 2650
},
{
"epoch": 0.07,
"learning_rate": 4.600000000000001e-06,
"loss": 3.031,
"step": 2700
},
{
"epoch": 0.07,
"learning_rate": 4.5e-06,
"loss": 3.092,
"step": 2750
},
{
"epoch": 0.07,
"learning_rate": 4.4e-06,
"loss": 3.0638,
"step": 2800
},
{
"epoch": 0.07,
"learning_rate": 4.3e-06,
"loss": 3.0483,
"step": 2850
},
{
"epoch": 0.07,
"learning_rate": 4.2000000000000004e-06,
"loss": 2.992,
"step": 2900
},
{
"epoch": 0.07,
"learning_rate": 4.1e-06,
"loss": 3.0777,
"step": 2950
},
{
"epoch": 0.07,
"learning_rate": 4.000000000000001e-06,
"loss": 3.0417,
"step": 3000
},
{
"epoch": 0.07,
"learning_rate": 3.900000000000001e-06,
"loss": 2.976,
"step": 3050
},
{
"epoch": 0.07,
"learning_rate": 3.8000000000000005e-06,
"loss": 3.0749,
"step": 3100
},
{
"epoch": 0.08,
"learning_rate": 3.7e-06,
"loss": 3.085,
"step": 3150
},
{
"epoch": 0.08,
"learning_rate": 3.6000000000000003e-06,
"loss": 3.0375,
"step": 3200
},
{
"epoch": 0.08,
"learning_rate": 3.5e-06,
"loss": 3.0349,
"step": 3250
},
{
"epoch": 0.08,
"learning_rate": 3.4000000000000005e-06,
"loss": 3.0532,
"step": 3300
},
{
"epoch": 0.08,
"learning_rate": 3.3000000000000006e-06,
"loss": 3.1154,
"step": 3350
},
{
"epoch": 0.08,
"learning_rate": 3.2000000000000003e-06,
"loss": 2.9963,
"step": 3400
},
{
"epoch": 0.08,
"learning_rate": 3.1000000000000004e-06,
"loss": 3.0137,
"step": 3450
},
{
"epoch": 0.08,
"learning_rate": 3e-06,
"loss": 3.0581,
"step": 3500
},
{
"epoch": 0.09,
"learning_rate": 2.9e-06,
"loss": 2.9854,
"step": 3550
},
{
"epoch": 0.09,
"learning_rate": 2.8000000000000003e-06,
"loss": 2.9893,
"step": 3600
},
{
"epoch": 0.09,
"learning_rate": 2.7000000000000004e-06,
"loss": 2.9317,
"step": 3650
},
{
"epoch": 0.09,
"learning_rate": 2.6e-06,
"loss": 3.0318,
"step": 3700
},
{
"epoch": 0.09,
"learning_rate": 2.5e-06,
"loss": 3.0336,
"step": 3750
},
{
"epoch": 0.09,
"learning_rate": 2.4000000000000003e-06,
"loss": 3.0419,
"step": 3800
},
{
"epoch": 0.09,
"learning_rate": 2.3000000000000004e-06,
"loss": 3.0265,
"step": 3850
},
{
"epoch": 0.09,
"learning_rate": 2.2e-06,
"loss": 2.9753,
"step": 3900
},
{
"epoch": 0.1,
"learning_rate": 2.1000000000000002e-06,
"loss": 3.021,
"step": 3950
},
{
"epoch": 0.1,
"learning_rate": 2.0000000000000003e-06,
"loss": 3.0237,
"step": 4000
},
{
"epoch": 0.1,
"learning_rate": 1.9000000000000002e-06,
"loss": 2.9938,
"step": 4050
},
{
"epoch": 0.1,
"learning_rate": 1.8000000000000001e-06,
"loss": 3.0129,
"step": 4100
},
{
"epoch": 0.1,
"learning_rate": 1.7000000000000002e-06,
"loss": 2.9862,
"step": 4150
},
{
"epoch": 0.1,
"learning_rate": 1.6000000000000001e-06,
"loss": 3.041,
"step": 4200
},
{
"epoch": 0.1,
"learning_rate": 1.5e-06,
"loss": 3.0256,
"step": 4250
},
{
"epoch": 0.1,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.9721,
"step": 4300
},
{
"epoch": 0.1,
"learning_rate": 1.3e-06,
"loss": 3.0076,
"step": 4350
},
{
"epoch": 0.11,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.0082,
"step": 4400
},
{
"epoch": 0.11,
"learning_rate": 1.1e-06,
"loss": 3.0354,
"step": 4450
},
{
"epoch": 0.11,
"learning_rate": 1.0000000000000002e-06,
"loss": 2.9866,
"step": 4500
},
{
"epoch": 0.11,
"learning_rate": 9.000000000000001e-07,
"loss": 3.0599,
"step": 4550
},
{
"epoch": 0.11,
"learning_rate": 8.000000000000001e-07,
"loss": 2.9539,
"step": 4600
},
{
"epoch": 0.11,
"learning_rate": 7.000000000000001e-07,
"loss": 2.96,
"step": 4650
},
{
"epoch": 0.11,
"learning_rate": 6.000000000000001e-07,
"loss": 3.0198,
"step": 4700
},
{
"epoch": 0.11,
"learning_rate": 5.000000000000001e-07,
"loss": 3.0299,
"step": 4750
},
{
"epoch": 0.12,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.9731,
"step": 4800
},
{
"epoch": 0.12,
"learning_rate": 3.0000000000000004e-07,
"loss": 3.0531,
"step": 4850
},
{
"epoch": 0.12,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.9388,
"step": 4900
},
{
"epoch": 0.12,
"learning_rate": 1.0000000000000001e-07,
"loss": 3.0682,
"step": 4950
},
{
"epoch": 0.12,
"learning_rate": 0.0,
"loss": 2.9654,
"step": 5000
}
],
"max_steps": 5000,
"num_train_epochs": 1,
"total_flos": 1.38662884933632e+18,
"trial_name": null,
"trial_params": null
}