fay-ong commited on
Commit
2883832
·
verified ·
1 Parent(s): 6ed31a9

End of training

Browse files
Files changed (4) hide show
  1. README.md +3 -2
  2. all_results.json +6 -6
  3. train_results.json +6 -6
  4. trainer_state.json +48 -139
README.md CHANGED
@@ -2,8 +2,9 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
- - unsloth
6
  - llama-factory
 
 
7
  - generated_from_trainer
8
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
9
  model-index:
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # llama-3-8b-finetuned
18
 
19
- This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - llama-factory
6
+ - lora
7
+ - unsloth
8
  - generated_from_trainer
9
  base_model: unsloth/llama-3-8b-Instruct-bnb-4bit
10
  model-index:
 
17
 
18
  # llama-3-8b-finetuned
19
 
20
+ This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit) on the formatted_data dataset.
21
 
22
  ## Model description
23
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "total_flos": 1.5362943036162048e+16,
4
- "train_loss": 0.16884834933280946,
5
- "train_runtime": 1424.4252,
6
- "train_samples_per_second": 0.702,
7
- "train_steps_per_second": 0.176
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "total_flos": 7074583249158144.0,
4
+ "train_loss": 0.2772752811908722,
5
+ "train_runtime": 751.7766,
6
+ "train_samples_per_second": 0.665,
7
+ "train_steps_per_second": 0.166
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "total_flos": 1.5362943036162048e+16,
4
- "train_loss": 0.16884834933280946,
5
- "train_runtime": 1424.4252,
6
- "train_samples_per_second": 0.702,
7
- "train_steps_per_second": 0.176
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "total_flos": 7074583249158144.0,
4
+ "train_loss": 0.2772752811908722,
5
+ "train_runtime": 751.7766,
6
+ "train_samples_per_second": 0.665,
7
+ "train_steps_per_second": 0.166
8
  }
trainer_state.json CHANGED
@@ -1,202 +1,111 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 500,
6
- "global_step": 250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
- "grad_norm": 10.01574420928955,
14
- "learning_rate": 6.4000000000000006e-06,
15
- "loss": 1.545,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
- "grad_norm": 5.001451015472412,
21
- "learning_rate": 1.4400000000000001e-05,
22
- "loss": 0.6012,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
- "grad_norm": 2.802138566970825,
28
- "learning_rate": 1.9991228300988586e-05,
29
- "loss": 0.3138,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
- "grad_norm": 5.7909345626831055,
35
- "learning_rate": 1.983571470813386e-05,
36
- "loss": 0.2683,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
- "grad_norm": 2.0869674682617188,
42
- "learning_rate": 1.9488760116444966e-05,
43
- "loss": 0.0889,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
- "grad_norm": 0.8485150337219238,
49
- "learning_rate": 1.895711760239413e-05,
50
- "loss": 0.202,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
- "grad_norm": 2.812760353088379,
56
- "learning_rate": 1.8251134982782952e-05,
57
- "loss": 0.1592,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
- "grad_norm": 7.189540863037109,
63
- "learning_rate": 1.7384553406258842e-05,
64
- "loss": 0.1,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
- "grad_norm": 2.625497579574585,
70
- "learning_rate": 1.63742398974869e-05,
71
- "loss": 0.1889,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
- "grad_norm": 0.10543157905340195,
77
- "learning_rate": 1.5239859059700794e-05,
78
- "loss": 0.1133,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
- "grad_norm": 4.1977386474609375,
84
- "learning_rate": 1.4003490325568953e-05,
85
- "loss": 0.0466,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
- "grad_norm": 0.047037750482559204,
91
- "learning_rate": 1.2689198206152657e-05,
92
- "loss": 0.1327,
93
  "step": 120
94
  },
95
  {
96
- "epoch": 1.04,
97
- "grad_norm": 1.9393171072006226,
98
- "learning_rate": 1.1322563902571227e-05,
99
- "loss": 0.0181,
100
- "step": 130
101
- },
102
- {
103
- "epoch": 1.12,
104
- "grad_norm": 3.4030303955078125,
105
- "learning_rate": 9.930187397020385e-06,
106
- "loss": 0.0681,
107
- "step": 140
108
- },
109
- {
110
- "epoch": 1.2,
111
- "grad_norm": 0.11634967476129532,
112
- "learning_rate": 8.539169714375885e-06,
113
- "loss": 0.0355,
114
- "step": 150
115
- },
116
- {
117
- "epoch": 1.28,
118
- "grad_norm": 1.0528508424758911,
119
- "learning_rate": 7.176585431571235e-06,
120
- "loss": 0.1156,
121
- "step": 160
122
- },
123
- {
124
- "epoch": 1.3599999999999999,
125
- "grad_norm": 0.18656061589717865,
126
- "learning_rate": 5.868955701754584e-06,
127
- "loss": 0.0125,
128
- "step": 170
129
- },
130
- {
131
- "epoch": 1.44,
132
- "grad_norm": 0.2206106185913086,
133
- "learning_rate": 4.641732050210032e-06,
134
- "loss": 0.0465,
135
- "step": 180
136
- },
137
- {
138
- "epoch": 1.52,
139
- "grad_norm": 0.03330332040786743,
140
- "learning_rate": 3.5188009893686916e-06,
141
- "loss": 0.0085,
142
- "step": 190
143
- },
144
- {
145
- "epoch": 1.6,
146
- "grad_norm": 0.2540493309497833,
147
- "learning_rate": 2.522019095014683e-06,
148
- "loss": 0.0382,
149
- "step": 200
150
- },
151
- {
152
- "epoch": 1.6800000000000002,
153
- "grad_norm": 0.01842614635825157,
154
- "learning_rate": 1.6707875928990059e-06,
155
- "loss": 0.0105,
156
- "step": 210
157
- },
158
- {
159
- "epoch": 1.76,
160
- "grad_norm": 1.4763164520263672,
161
- "learning_rate": 9.816747359488632e-07,
162
- "loss": 0.0363,
163
- "step": 220
164
- },
165
- {
166
- "epoch": 1.8399999999999999,
167
- "grad_norm": 0.038556963205337524,
168
- "learning_rate": 4.6809332207053083e-07,
169
- "loss": 0.0317,
170
- "step": 230
171
- },
172
- {
173
- "epoch": 1.92,
174
- "grad_norm": 6.009864330291748,
175
- "learning_rate": 1.400396292949513e-07,
176
- "loss": 0.0262,
177
- "step": 240
178
- },
179
- {
180
- "epoch": 2.0,
181
- "grad_norm": 0.019221752882003784,
182
- "learning_rate": 3.898849596456477e-09,
183
- "loss": 0.0135,
184
- "step": 250
185
- },
186
- {
187
- "epoch": 2.0,
188
- "step": 250,
189
- "total_flos": 1.5362943036162048e+16,
190
- "train_loss": 0.16884834933280946,
191
- "train_runtime": 1424.4252,
192
- "train_samples_per_second": 0.702,
193
- "train_steps_per_second": 0.176
194
  }
195
  ],
196
  "logging_steps": 10,
197
- "max_steps": 250,
198
  "num_input_tokens_seen": 0,
199
- "num_train_epochs": 2,
200
  "save_steps": 1000,
201
  "stateful_callbacks": {
202
  "TrainerControl": {
@@ -210,7 +119,7 @@
210
  "attributes": {}
211
  }
212
  },
213
- "total_flos": 1.5362943036162048e+16,
214
  "train_batch_size": 1,
215
  "trial_name": null,
216
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
  "eval_steps": 500,
6
+ "global_step": 125,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.08,
13
+ "grad_norm": 7.301303386688232,
14
+ "learning_rate": 1.3846153846153847e-05,
15
+ "loss": 1.5694,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.16,
20
+ "grad_norm": 3.652470827102661,
21
+ "learning_rate": 1.985871018518236e-05,
22
+ "loss": 0.3644,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.24,
27
+ "grad_norm": 5.326970100402832,
28
+ "learning_rate": 1.912783265061319e-05,
29
+ "loss": 0.374,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.32,
34
+ "grad_norm": 0.5815876126289368,
35
+ "learning_rate": 1.7640373758216075e-05,
36
+ "loss": 0.1797,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 0.4,
41
+ "grad_norm": 1.439568281173706,
42
+ "learning_rate": 1.5555702330196024e-05,
43
+ "loss": 0.0685,
44
  "step": 50
45
  },
46
  {
47
  "epoch": 0.48,
48
+ "grad_norm": 0.09065556526184082,
49
+ "learning_rate": 1.3036767451096148e-05,
50
+ "loss": 0.1244,
51
  "step": 60
52
  },
53
  {
54
  "epoch": 0.56,
55
+ "grad_norm": 2.369553327560425,
56
+ "learning_rate": 1.028046256275869e-05,
57
+ "loss": 0.1301,
58
  "step": 70
59
  },
60
  {
61
  "epoch": 0.64,
62
+ "grad_norm": 2.2806155681610107,
63
+ "learning_rate": 7.50223521832773e-06,
64
+ "loss": 0.2033,
65
  "step": 80
66
  },
67
  {
68
  "epoch": 0.72,
69
+ "grad_norm": 1.1145380735397339,
70
+ "learning_rate": 4.919246547534709e-06,
71
+ "loss": 0.1496,
72
  "step": 90
73
  },
74
  {
75
  "epoch": 0.8,
76
+ "grad_norm": 1.7836447954177856,
77
+ "learning_rate": 2.7333967796597317e-06,
78
+ "loss": 0.0447,
79
  "step": 100
80
  },
81
  {
82
  "epoch": 0.88,
83
+ "grad_norm": 0.2162286639213562,
84
+ "learning_rate": 1.1155436402112785e-06,
85
+ "loss": 0.1065,
86
  "step": 110
87
  },
88
  {
89
  "epoch": 0.96,
90
+ "grad_norm": 1.168771505355835,
91
+ "learning_rate": 1.921471959676957e-07,
92
+ "loss": 0.1084,
93
  "step": 120
94
  },
95
  {
96
+ "epoch": 1.0,
97
+ "step": 125,
98
+ "total_flos": 7074583249158144.0,
99
+ "train_loss": 0.2772752811908722,
100
+ "train_runtime": 751.7766,
101
+ "train_samples_per_second": 0.665,
102
+ "train_steps_per_second": 0.166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  }
104
  ],
105
  "logging_steps": 10,
106
+ "max_steps": 125,
107
  "num_input_tokens_seen": 0,
108
+ "num_train_epochs": 1,
109
  "save_steps": 1000,
110
  "stateful_callbacks": {
111
  "TrainerControl": {
 
119
  "attributes": {}
120
  }
121
  },
122
+ "total_flos": 7074583249158144.0,
123
  "train_batch_size": 1,
124
  "trial_name": null,
125
  "trial_params": null