swagat-panda commited on
Commit
75658bb
1 Parent(s): daf6642

Upload trainer_state.json

Browse files
Files changed (1) hide show
  1. trainer_state.json +130 -0
trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "global_step": 9820,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.1,
12
+ "learning_rate": 4.9693251533742335e-05,
13
+ "loss": 20966.374,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.2,
18
+ "learning_rate": 4.918200408997955e-05,
19
+ "loss": 2277.2115,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.31,
24
+ "learning_rate": 4.867075664621677e-05,
25
+ "loss": 549.5879,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.41,
30
+ "learning_rate": 4.815950920245399e-05,
31
+ "loss": 283.1544,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.51,
36
+ "learning_rate": 4.764826175869121e-05,
37
+ "loss": 184.1967,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.61,
42
+ "learning_rate": 4.7137014314928426e-05,
43
+ "loss": 137.0529,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.71,
48
+ "learning_rate": 4.6625766871165645e-05,
49
+ "loss": 114.6247,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.81,
54
+ "learning_rate": 4.611451942740286e-05,
55
+ "loss": 102.8662,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.92,
60
+ "learning_rate": 4.560327198364008e-05,
61
+ "loss": 97.2861,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 1.02,
66
+ "learning_rate": 4.5092024539877307e-05,
67
+ "loss": 87.1393,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 1.12,
72
+ "learning_rate": 4.4580777096114525e-05,
73
+ "loss": 73.0929,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 1.22,
78
+ "learning_rate": 4.4069529652351736e-05,
79
+ "loss": 70.3143,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 1.32,
84
+ "learning_rate": 4.3558282208588955e-05,
85
+ "loss": 67.2985,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 1.43,
90
+ "learning_rate": 4.304703476482618e-05,
91
+ "loss": 69.378,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 1.53,
96
+ "learning_rate": 4.25357873210634e-05,
97
+ "loss": 64.2011,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 1.63,
102
+ "learning_rate": 4.2024539877300617e-05,
103
+ "loss": 63.1896,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 1.73,
108
+ "learning_rate": 4.1513292433537835e-05,
109
+ "loss": 63.1367,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 1.83,
114
+ "learning_rate": 4.100204498977505e-05,
115
+ "loss": 60.7672,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 1.93,
120
+ "learning_rate": 4.049079754601227e-05,
121
+ "loss": 61.0841,
122
+ "step": 9500
123
+ }
124
+ ],
125
+ "max_steps": 49100,
126
+ "num_train_epochs": 10,
127
+ "total_flos": 9.13949176598519e+16,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }