fay-ong commited on
Commit
8ea8288
1 Parent(s): 2850c02

End of training

Browse files
Files changed (4) hide show
  1. README.md +3 -2
  2. all_results.json +5 -5
  3. train_results.json +5 -5
  4. trainer_state.json +80 -80
README.md CHANGED
@@ -2,8 +2,9 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
- - unsloth
6
  - llama-factory
 
 
7
  - generated_from_trainer
8
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
9
  model-index:
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # llama-3-8b-finetuned
18
 
19
- This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - llama-factory
6
+ - lora
7
+ - unsloth
8
  - generated_from_trainer
9
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
10
  model-index:
 
17
 
18
  # llama-3-8b-finetuned
19
 
20
+ This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on the formatted_data dataset.
21
 
22
  ## Model description
23
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "total_flos": 3.846009907091866e+16,
4
- "train_loss": 0.16032438999414445,
5
- "train_runtime": 2680.6813,
6
- "train_samples_per_second": 0.373,
7
- "train_steps_per_second": 0.093
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "total_flos": 1.4149166716420096e+16,
4
+ "train_loss": 0.18670074969530107,
5
+ "train_runtime": 1345.2529,
6
+ "train_samples_per_second": 0.743,
7
+ "train_steps_per_second": 0.186
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 2.0,
3
- "total_flos": 3.846009907091866e+16,
4
- "train_loss": 0.16032438999414445,
5
- "train_runtime": 2680.6813,
6
- "train_samples_per_second": 0.373,
7
- "train_steps_per_second": 0.093
8
  }
 
1
  {
2
  "epoch": 2.0,
3
+ "total_flos": 1.4149166716420096e+16,
4
+ "train_loss": 0.18670074969530107,
5
+ "train_runtime": 1345.2529,
6
+ "train_samples_per_second": 0.743,
7
+ "train_steps_per_second": 0.186
8
  }
trainer_state.json CHANGED
@@ -10,187 +10,187 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
- "grad_norm": 6.857266902923584,
14
  "learning_rate": 7.2000000000000005e-06,
15
- "loss": 1.585,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
- "grad_norm": 2.489650011062622,
21
- "learning_rate": 1.5200000000000002e-05,
22
- "loss": 0.268,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
- "grad_norm": 6.398056507110596,
28
- "learning_rate": 1.9984407641819812e-05,
29
- "loss": 0.2895,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
- "grad_norm": 0.1504039317369461,
35
- "learning_rate": 1.9809551553491918e-05,
36
- "loss": 0.1859,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
- "grad_norm": 1.2296534776687622,
42
- "learning_rate": 1.944376370237481e-05,
43
- "loss": 0.0984,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
- "grad_norm": 0.012895800173282623,
49
- "learning_rate": 1.889416373291298e-05,
50
- "loss": 0.0668,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
- "grad_norm": 2.1277153491973877,
56
- "learning_rate": 1.8171448983351284e-05,
57
- "loss": 0.1228,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
- "grad_norm": 1.6015583276748657,
63
- "learning_rate": 1.7289686274214116e-05,
64
- "loss": 0.193,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
- "grad_norm": 0.42995280027389526,
70
- "learning_rate": 1.6266038113644605e-05,
71
- "loss": 0.1573,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
- "grad_norm": 0.2124607264995575,
77
- "learning_rate": 1.5120428648705716e-05,
78
- "loss": 0.0905,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
- "grad_norm": 0.20811651647090912,
84
- "learning_rate": 1.3875155864521031e-05,
85
- "loss": 0.1169,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
- "grad_norm": 0.7650100588798523,
91
- "learning_rate": 1.2554457579357906e-05,
92
- "loss": 0.125,
93
  "step": 120
94
  },
95
  {
96
  "epoch": 1.04,
97
- "grad_norm": 0.23556451499462128,
98
- "learning_rate": 1.1184039683065014e-05,
99
- "loss": 0.0576,
100
  "step": 130
101
  },
102
  {
103
  "epoch": 1.12,
104
- "grad_norm": 0.6805216073989868,
105
- "learning_rate": 9.790575801166432e-06,
106
- "loss": 0.0703,
107
  "step": 140
108
  },
109
  {
110
  "epoch": 1.2,
111
- "grad_norm": 0.023868851363658905,
112
- "learning_rate": 8.401188123081653e-06,
113
- "loss": 0.049,
114
  "step": 150
115
  },
116
  {
117
  "epoch": 1.28,
118
- "grad_norm": 1.3114885091781616,
119
- "learning_rate": 7.042919499559538e-06,
120
- "loss": 0.129,
121
  "step": 160
122
  },
123
  {
124
  "epoch": 1.3599999999999999,
125
- "grad_norm": 0.2610754668712616,
126
- "learning_rate": 5.742207084349274e-06,
127
- "loss": 0.0652,
128
  "step": 170
129
  },
130
  {
131
  "epoch": 1.44,
132
- "grad_norm": 2.7140579223632812,
133
- "learning_rate": 4.524367765074499e-06,
134
- "loss": 0.0147,
135
  "step": 180
136
  },
137
  {
138
  "epoch": 1.52,
139
- "grad_norm": 1.237959384918213,
140
- "learning_rate": 3.4131053988131947e-06,
141
- "loss": 0.0694,
142
  "step": 190
143
  },
144
  {
145
  "epoch": 1.6,
146
- "grad_norm": 0.09863968938589096,
147
- "learning_rate": 2.4300494434824373e-06,
148
- "loss": 0.0786,
149
  "step": 200
150
  },
151
  {
152
  "epoch": 1.6800000000000002,
153
- "grad_norm": 0.009426446631550789,
154
- "learning_rate": 1.5943339650431578e-06,
155
- "loss": 0.0301,
156
  "step": 210
157
  },
158
  {
159
  "epoch": 1.76,
160
- "grad_norm": 1.9068654775619507,
161
- "learning_rate": 9.222252146709143e-07,
162
- "loss": 0.025,
163
  "step": 220
164
  },
165
  {
166
  "epoch": 1.8399999999999999,
167
- "grad_norm": 0.03250988572835922,
168
- "learning_rate": 4.268050246793276e-07,
169
- "loss": 0.0093,
170
  "step": 230
171
  },
172
  {
173
  "epoch": 1.92,
174
- "grad_norm": 0.29542025923728943,
175
- "learning_rate": 1.1771618553447217e-07,
176
- "loss": 0.0756,
177
  "step": 240
178
  },
179
  {
180
  "epoch": 2.0,
181
- "grad_norm": 0.12926587462425232,
182
- "learning_rate": 9.74759906957612e-10,
183
- "loss": 0.0351,
184
  "step": 250
185
  },
186
  {
187
  "epoch": 2.0,
188
  "step": 250,
189
- "total_flos": 3.846009907091866e+16,
190
- "train_loss": 0.16032438999414445,
191
- "train_runtime": 2680.6813,
192
- "train_samples_per_second": 0.373,
193
- "train_steps_per_second": 0.093
194
  }
195
  ],
196
  "logging_steps": 10,
@@ -210,7 +210,7 @@
210
  "attributes": {}
211
  }
212
  },
213
- "total_flos": 3.846009907091866e+16,
214
  "train_batch_size": 1,
215
  "trial_name": null,
216
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
+ "grad_norm": 6.015913963317871,
14
  "learning_rate": 7.2000000000000005e-06,
15
+ "loss": 1.8804,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
+ "grad_norm": 2.7702841758728027,
21
+ "learning_rate": 1.4400000000000001e-05,
22
+ "loss": 0.469,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
+ "grad_norm": 6.33322811126709,
28
+ "learning_rate": 1.9991228300988586e-05,
29
+ "loss": 0.3463,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
+ "grad_norm": 2.0083963871002197,
35
+ "learning_rate": 1.983571470813386e-05,
36
+ "loss": 0.1673,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
+ "grad_norm": 1.2964335680007935,
42
+ "learning_rate": 1.9488760116444966e-05,
43
+ "loss": 0.106,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
+ "grad_norm": 0.03695794567465782,
49
+ "learning_rate": 1.895711760239413e-05,
50
+ "loss": 0.1623,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
+ "grad_norm": 2.0371510982513428,
56
+ "learning_rate": 1.8251134982782952e-05,
57
+ "loss": 0.1585,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
+ "grad_norm": 2.199394702911377,
63
+ "learning_rate": 1.7384553406258842e-05,
64
+ "loss": 0.192,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
+ "grad_norm": 1.681180477142334,
70
+ "learning_rate": 1.63742398974869e-05,
71
+ "loss": 0.1705,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
+ "grad_norm": 0.6913540959358215,
77
+ "learning_rate": 1.5239859059700794e-05,
78
+ "loss": 0.0652,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
+ "grad_norm": 0.09698222577571869,
84
+ "learning_rate": 1.4003490325568953e-05,
85
+ "loss": 0.1237,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
+ "grad_norm": 0.565293550491333,
91
+ "learning_rate": 1.2689198206152657e-05,
92
+ "loss": 0.1669,
93
  "step": 120
94
  },
95
  {
96
  "epoch": 1.04,
97
+ "grad_norm": 0.28498321771621704,
98
+ "learning_rate": 1.1322563902571227e-05,
99
+ "loss": 0.0606,
100
  "step": 130
101
  },
102
  {
103
  "epoch": 1.12,
104
+ "grad_norm": 0.46145015954971313,
105
+ "learning_rate": 9.930187397020385e-06,
106
+ "loss": 0.1064,
107
  "step": 140
108
  },
109
  {
110
  "epoch": 1.2,
111
+ "grad_norm": 0.04056666046380997,
112
+ "learning_rate": 8.539169714375885e-06,
113
+ "loss": 0.0548,
114
  "step": 150
115
  },
116
  {
117
  "epoch": 1.28,
118
+ "grad_norm": 1.3497525453567505,
119
+ "learning_rate": 7.176585431571235e-06,
120
+ "loss": 0.1326,
121
  "step": 160
122
  },
123
  {
124
  "epoch": 1.3599999999999999,
125
+ "grad_norm": 0.316214382648468,
126
+ "learning_rate": 5.868955701754584e-06,
127
+ "loss": 0.0843,
128
  "step": 170
129
  },
130
  {
131
  "epoch": 1.44,
132
+ "grad_norm": 1.4990209341049194,
133
+ "learning_rate": 4.641732050210032e-06,
134
+ "loss": 0.0121,
135
  "step": 180
136
  },
137
  {
138
  "epoch": 1.52,
139
+ "grad_norm": 0.16705955564975739,
140
+ "learning_rate": 3.5188009893686916e-06,
141
+ "loss": 0.0227,
142
  "step": 190
143
  },
144
  {
145
  "epoch": 1.6,
146
+ "grad_norm": 0.6158097982406616,
147
+ "learning_rate": 2.522019095014683e-06,
148
+ "loss": 0.0385,
149
  "step": 200
150
  },
151
  {
152
  "epoch": 1.6800000000000002,
153
+ "grad_norm": 0.018577594310045242,
154
+ "learning_rate": 1.6707875928990059e-06,
155
+ "loss": 0.0295,
156
  "step": 210
157
  },
158
  {
159
  "epoch": 1.76,
160
+ "grad_norm": 2.720700740814209,
161
+ "learning_rate": 9.816747359488632e-07,
162
+ "loss": 0.0206,
163
  "step": 220
164
  },
165
  {
166
  "epoch": 1.8399999999999999,
167
+ "grad_norm": 0.03381221741437912,
168
+ "learning_rate": 4.6809332207053083e-07,
169
+ "loss": 0.0089,
170
  "step": 230
171
  },
172
  {
173
  "epoch": 1.92,
174
+ "grad_norm": 0.07453195750713348,
175
+ "learning_rate": 1.400396292949513e-07,
176
+ "loss": 0.0576,
177
  "step": 240
178
  },
179
  {
180
  "epoch": 2.0,
181
+ "grad_norm": 0.23894575238227844,
182
+ "learning_rate": 3.898849596456477e-09,
183
+ "loss": 0.0308,
184
  "step": 250
185
  },
186
  {
187
  "epoch": 2.0,
188
  "step": 250,
189
+ "total_flos": 1.4149166716420096e+16,
190
+ "train_loss": 0.18670074969530107,
191
+ "train_runtime": 1345.2529,
192
+ "train_samples_per_second": 0.743,
193
+ "train_steps_per_second": 0.186
194
  }
195
  ],
196
  "logging_steps": 10,
 
210
  "attributes": {}
211
  }
212
  },
213
+ "total_flos": 1.4149166716420096e+16,
214
  "train_batch_size": 1,
215
  "trial_name": null,
216
  "trial_params": null