Model save
Browse files
README.md
CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
20 |
|
21 |
This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
-
- Loss:
|
24 |
|
25 |
## Model description
|
26 |
|
@@ -39,7 +39,7 @@ More information needed
|
|
39 |
### Training hyperparameters
|
40 |
|
41 |
The following hyperparameters were used during training:
|
42 |
-
- learning_rate: 0.
|
43 |
- train_batch_size: 8
|
44 |
- eval_batch_size: 8
|
45 |
- seed: 42
|
@@ -49,15 +49,15 @@ The following hyperparameters were used during training:
|
|
49 |
- total_train_batch_size: 128
|
50 |
- total_eval_batch_size: 64
|
51 |
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
52 |
-
- lr_scheduler_type:
|
53 |
-
- lr_scheduler_warmup_ratio: 0.
|
54 |
- num_epochs: 1
|
55 |
|
56 |
### Training results
|
57 |
|
58 |
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
|:-------------:|:-----:|:----:|:---------------:|
|
60 |
-
|
|
61 |
|
62 |
|
63 |
### Framework versions
|
|
|
20 |
|
21 |
This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 149.4047
|
24 |
|
25 |
## Model description
|
26 |
|
|
|
39 |
### Training hyperparameters
|
40 |
|
41 |
The following hyperparameters were used during training:
|
42 |
+
- learning_rate: 0.03
|
43 |
- train_batch_size: 8
|
44 |
- eval_batch_size: 8
|
45 |
- seed: 42
|
|
|
49 |
- total_train_batch_size: 128
|
50 |
- total_eval_batch_size: 64
|
51 |
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
52 |
+
- lr_scheduler_type: linear
|
53 |
+
- lr_scheduler_warmup_ratio: 0.06
|
54 |
- num_epochs: 1
|
55 |
|
56 |
### Training results
|
57 |
|
58 |
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
|:-------------:|:-----:|:----:|:---------------:|
|
60 |
+
| 149.2635 | 1.0 | 140 | 149.4047 |
|
61 |
|
62 |
|
63 |
### Framework versions
|
all_results.json
CHANGED
@@ -1,14 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
"eval_samples_per_second": 46.154,
|
7 |
-
"eval_steps_per_second": 0.778,
|
8 |
-
"total_flos": 4.268849030789857e+17,
|
9 |
-
"train_loss": 5.964417205538068,
|
10 |
-
"train_runtime": 1743.5737,
|
11 |
"train_samples": 51241,
|
12 |
-
"train_samples_per_second":
|
13 |
-
"train_steps_per_second": 0.
|
14 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"total_flos": 4.2812236905630925e+17,
|
4 |
+
"train_loss": 249.82138957977295,
|
5 |
+
"train_runtime": 1603.7748,
|
|
|
|
|
|
|
|
|
|
|
6 |
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 11.164,
|
8 |
+
"train_steps_per_second": 0.087
|
9 |
}
|
runs/Nov16_02-36-23_main-lora-gemma7b-alpaca-0-0/events.out.tfevents.1731743352.main-lora-gemma7b-alpaca-0-0.457.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7805526aaa887216e22864f882526d821db047e25fa9a2861f759fd3db351cc2
|
3 |
+
size 13298
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"total_flos": 4.
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 51241,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"total_flos": 4.2812236905630925e+17,
|
4 |
+
"train_loss": 249.82138957977295,
|
5 |
+
"train_runtime": 1603.7748,
|
6 |
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 11.164,
|
8 |
+
"train_steps_per_second": 0.087
|
9 |
}
|
trainer_state.json
CHANGED
@@ -10,223 +10,223 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.007142857142857143,
|
13 |
-
"grad_norm":
|
14 |
-
"learning_rate":
|
15 |
"loss": 27.4831,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.03571428571428571,
|
20 |
-
"grad_norm":
|
21 |
-
"learning_rate":
|
22 |
-
"loss":
|
23 |
"step": 5
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.07142857142857142,
|
27 |
-
"grad_norm":
|
28 |
-
"learning_rate": 0.
|
29 |
-
"loss":
|
30 |
"step": 10
|
31 |
},
|
32 |
{
|
33 |
"epoch": 0.10714285714285714,
|
34 |
-
"grad_norm":
|
35 |
-
"learning_rate": 0.
|
36 |
-
"loss":
|
37 |
"step": 15
|
38 |
},
|
39 |
{
|
40 |
"epoch": 0.14285714285714285,
|
41 |
-
"grad_norm":
|
42 |
-
"learning_rate": 0.
|
43 |
-
"loss":
|
44 |
"step": 20
|
45 |
},
|
46 |
{
|
47 |
"epoch": 0.17857142857142858,
|
48 |
-
"grad_norm":
|
49 |
-
"learning_rate": 0.
|
50 |
-
"loss":
|
51 |
"step": 25
|
52 |
},
|
53 |
{
|
54 |
"epoch": 0.21428571428571427,
|
55 |
-
"grad_norm":
|
56 |
-
"learning_rate": 0.
|
57 |
-
"loss":
|
58 |
"step": 30
|
59 |
},
|
60 |
{
|
61 |
"epoch": 0.25,
|
62 |
-
"grad_norm":
|
63 |
-
"learning_rate": 0.
|
64 |
-
"loss":
|
65 |
"step": 35
|
66 |
},
|
67 |
{
|
68 |
"epoch": 0.2857142857142857,
|
69 |
-
"grad_norm":
|
70 |
-
"learning_rate": 0.
|
71 |
-
"loss":
|
72 |
"step": 40
|
73 |
},
|
74 |
{
|
75 |
"epoch": 0.32142857142857145,
|
76 |
-
"grad_norm":
|
77 |
-
"learning_rate": 0.
|
78 |
-
"loss":
|
79 |
"step": 45
|
80 |
},
|
81 |
{
|
82 |
"epoch": 0.35714285714285715,
|
83 |
-
"grad_norm":
|
84 |
-
"learning_rate": 0.
|
85 |
-
"loss":
|
86 |
"step": 50
|
87 |
},
|
88 |
{
|
89 |
"epoch": 0.39285714285714285,
|
90 |
-
"grad_norm":
|
91 |
-
"learning_rate": 0.
|
92 |
-
"loss":
|
93 |
"step": 55
|
94 |
},
|
95 |
{
|
96 |
"epoch": 0.42857142857142855,
|
97 |
-
"grad_norm":
|
98 |
-
"learning_rate": 0.
|
99 |
-
"loss":
|
100 |
"step": 60
|
101 |
},
|
102 |
{
|
103 |
"epoch": 0.4642857142857143,
|
104 |
-
"grad_norm":
|
105 |
-
"learning_rate": 0.
|
106 |
-
"loss":
|
107 |
"step": 65
|
108 |
},
|
109 |
{
|
110 |
"epoch": 0.5,
|
111 |
-
"grad_norm":
|
112 |
-
"learning_rate": 0.
|
113 |
-
"loss":
|
114 |
"step": 70
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.5357142857142857,
|
118 |
-
"grad_norm":
|
119 |
-
"learning_rate": 0.
|
120 |
-
"loss":
|
121 |
"step": 75
|
122 |
},
|
123 |
{
|
124 |
"epoch": 0.5714285714285714,
|
125 |
-
"grad_norm":
|
126 |
-
"learning_rate":
|
127 |
-
"loss":
|
128 |
"step": 80
|
129 |
},
|
130 |
{
|
131 |
"epoch": 0.6071428571428571,
|
132 |
-
"grad_norm":
|
133 |
-
"learning_rate":
|
134 |
-
"loss":
|
135 |
"step": 85
|
136 |
},
|
137 |
{
|
138 |
"epoch": 0.6428571428571429,
|
139 |
-
"grad_norm":
|
140 |
-
"learning_rate":
|
141 |
-
"loss":
|
142 |
"step": 90
|
143 |
},
|
144 |
{
|
145 |
"epoch": 0.6785714285714286,
|
146 |
-
"grad_norm":
|
147 |
-
"learning_rate":
|
148 |
-
"loss":
|
149 |
"step": 95
|
150 |
},
|
151 |
{
|
152 |
"epoch": 0.7142857142857143,
|
153 |
-
"grad_norm":
|
154 |
-
"learning_rate":
|
155 |
-
"loss":
|
156 |
"step": 100
|
157 |
},
|
158 |
{
|
159 |
"epoch": 0.75,
|
160 |
-
"grad_norm":
|
161 |
-
"learning_rate":
|
162 |
-
"loss":
|
163 |
"step": 105
|
164 |
},
|
165 |
{
|
166 |
"epoch": 0.7857142857142857,
|
167 |
-
"grad_norm":
|
168 |
-
"learning_rate":
|
169 |
-
"loss":
|
170 |
"step": 110
|
171 |
},
|
172 |
{
|
173 |
"epoch": 0.8214285714285714,
|
174 |
-
"grad_norm":
|
175 |
-
"learning_rate":
|
176 |
-
"loss":
|
177 |
"step": 115
|
178 |
},
|
179 |
{
|
180 |
"epoch": 0.8571428571428571,
|
181 |
-
"grad_norm":
|
182 |
-
"learning_rate":
|
183 |
-
"loss":
|
184 |
"step": 120
|
185 |
},
|
186 |
{
|
187 |
"epoch": 0.8928571428571429,
|
188 |
-
"grad_norm":
|
189 |
-
"learning_rate":
|
190 |
-
"loss":
|
191 |
"step": 125
|
192 |
},
|
193 |
{
|
194 |
"epoch": 0.9285714285714286,
|
195 |
-
"grad_norm":
|
196 |
-
"learning_rate":
|
197 |
-
"loss":
|
198 |
"step": 130
|
199 |
},
|
200 |
{
|
201 |
"epoch": 0.9642857142857143,
|
202 |
-
"grad_norm":
|
203 |
-
"learning_rate":
|
204 |
-
"loss":
|
205 |
"step": 135
|
206 |
},
|
207 |
{
|
208 |
"epoch": 1.0,
|
209 |
-
"grad_norm":
|
210 |
"learning_rate": 0.0,
|
211 |
-
"loss":
|
212 |
"step": 140
|
213 |
},
|
214 |
{
|
215 |
"epoch": 1.0,
|
216 |
-
"eval_loss":
|
217 |
-
"eval_runtime":
|
218 |
-
"eval_samples_per_second":
|
219 |
-
"eval_steps_per_second":
|
220 |
"step": 140
|
221 |
},
|
222 |
{
|
223 |
"epoch": 1.0,
|
224 |
"step": 140,
|
225 |
-
"total_flos": 4.
|
226 |
-
"train_loss":
|
227 |
-
"train_runtime":
|
228 |
-
"train_samples_per_second":
|
229 |
-
"train_steps_per_second": 0.
|
230 |
}
|
231 |
],
|
232 |
"logging_steps": 5,
|
@@ -246,7 +246,7 @@
|
|
246 |
"attributes": {}
|
247 |
}
|
248 |
},
|
249 |
-
"total_flos": 4.
|
250 |
"train_batch_size": 8,
|
251 |
"trial_name": null,
|
252 |
"trial_params": null
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.007142857142857143,
|
13 |
+
"grad_norm": 286.327392578125,
|
14 |
+
"learning_rate": 0.003333333333333333,
|
15 |
"loss": 27.4831,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.03571428571428571,
|
20 |
+
"grad_norm": 564.7811889648438,
|
21 |
+
"learning_rate": 0.016666666666666666,
|
22 |
+
"loss": 180.9505,
|
23 |
"step": 5
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.07142857142857142,
|
27 |
+
"grad_norm": 45641.8671875,
|
28 |
+
"learning_rate": 0.029770992366412213,
|
29 |
+
"loss": 193.5063,
|
30 |
"step": 10
|
31 |
},
|
32 |
{
|
33 |
"epoch": 0.10714285714285714,
|
34 |
+
"grad_norm": 1771723.5,
|
35 |
+
"learning_rate": 0.02862595419847328,
|
36 |
+
"loss": 376.766,
|
37 |
"step": 15
|
38 |
},
|
39 |
{
|
40 |
"epoch": 0.14285714285714285,
|
41 |
+
"grad_norm": 286214.5,
|
42 |
+
"learning_rate": 0.02748091603053435,
|
43 |
+
"loss": 569.9451,
|
44 |
"step": 20
|
45 |
},
|
46 |
{
|
47 |
"epoch": 0.17857142857142858,
|
48 |
+
"grad_norm": 1100.7747802734375,
|
49 |
+
"learning_rate": 0.02633587786259542,
|
50 |
+
"loss": 615.8839,
|
51 |
"step": 25
|
52 |
},
|
53 |
{
|
54 |
"epoch": 0.21428571428571427,
|
55 |
+
"grad_norm": 169022.0,
|
56 |
+
"learning_rate": 0.025190839694656485,
|
57 |
+
"loss": 481.0353,
|
58 |
"step": 30
|
59 |
},
|
60 |
{
|
61 |
"epoch": 0.25,
|
62 |
+
"grad_norm": 333.40972900390625,
|
63 |
+
"learning_rate": 0.024045801526717557,
|
64 |
+
"loss": 305.1498,
|
65 |
"step": 35
|
66 |
},
|
67 |
{
|
68 |
"epoch": 0.2857142857142857,
|
69 |
+
"grad_norm": 139948.15625,
|
70 |
+
"learning_rate": 0.022900763358778626,
|
71 |
+
"loss": 241.1332,
|
72 |
"step": 40
|
73 |
},
|
74 |
{
|
75 |
"epoch": 0.32142857142857145,
|
76 |
+
"grad_norm": 1393742.875,
|
77 |
+
"learning_rate": 0.021755725190839695,
|
78 |
+
"loss": 207.3882,
|
79 |
"step": 45
|
80 |
},
|
81 |
{
|
82 |
"epoch": 0.35714285714285715,
|
83 |
+
"grad_norm": 12597.32421875,
|
84 |
+
"learning_rate": 0.020610687022900764,
|
85 |
+
"loss": 201.0974,
|
86 |
"step": 50
|
87 |
},
|
88 |
{
|
89 |
"epoch": 0.39285714285714285,
|
90 |
+
"grad_norm": 1081976.875,
|
91 |
+
"learning_rate": 0.01946564885496183,
|
92 |
+
"loss": 222.2186,
|
93 |
"step": 55
|
94 |
},
|
95 |
{
|
96 |
"epoch": 0.42857142857142855,
|
97 |
+
"grad_norm": 18104.56640625,
|
98 |
+
"learning_rate": 0.0183206106870229,
|
99 |
+
"loss": 254.4615,
|
100 |
"step": 60
|
101 |
},
|
102 |
{
|
103 |
"epoch": 0.4642857142857143,
|
104 |
+
"grad_norm": 53967.2265625,
|
105 |
+
"learning_rate": 0.017175572519083967,
|
106 |
+
"loss": 256.7784,
|
107 |
"step": 65
|
108 |
},
|
109 |
{
|
110 |
"epoch": 0.5,
|
111 |
+
"grad_norm": 36778.24609375,
|
112 |
+
"learning_rate": 0.01603053435114504,
|
113 |
+
"loss": 189.0749,
|
114 |
"step": 70
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.5357142857142857,
|
118 |
+
"grad_norm": 1759.3746337890625,
|
119 |
+
"learning_rate": 0.014885496183206106,
|
120 |
+
"loss": 198.0302,
|
121 |
"step": 75
|
122 |
},
|
123 |
{
|
124 |
"epoch": 0.5714285714285714,
|
125 |
+
"grad_norm": 2888.434326171875,
|
126 |
+
"learning_rate": 0.013740458015267175,
|
127 |
+
"loss": 208.9426,
|
128 |
"step": 80
|
129 |
},
|
130 |
{
|
131 |
"epoch": 0.6071428571428571,
|
132 |
+
"grad_norm": 3183.7255859375,
|
133 |
+
"learning_rate": 0.012595419847328242,
|
134 |
+
"loss": 237.6604,
|
135 |
"step": 85
|
136 |
},
|
137 |
{
|
138 |
"epoch": 0.6428571428571429,
|
139 |
+
"grad_norm": 295.5591735839844,
|
140 |
+
"learning_rate": 0.011450381679389313,
|
141 |
+
"loss": 259.345,
|
142 |
"step": 90
|
143 |
},
|
144 |
{
|
145 |
"epoch": 0.6785714285714286,
|
146 |
+
"grad_norm": 2817.36083984375,
|
147 |
+
"learning_rate": 0.010305343511450382,
|
148 |
+
"loss": 229.8393,
|
149 |
"step": 95
|
150 |
},
|
151 |
{
|
152 |
"epoch": 0.7142857142857143,
|
153 |
+
"grad_norm": 1439.1002197265625,
|
154 |
+
"learning_rate": 0.00916030534351145,
|
155 |
+
"loss": 206.7085,
|
156 |
"step": 100
|
157 |
},
|
158 |
{
|
159 |
"epoch": 0.75,
|
160 |
+
"grad_norm": 2063.854248046875,
|
161 |
+
"learning_rate": 0.00801526717557252,
|
162 |
+
"loss": 200.2123,
|
163 |
"step": 105
|
164 |
},
|
165 |
{
|
166 |
"epoch": 0.7857142857142857,
|
167 |
+
"grad_norm": 3193.01953125,
|
168 |
+
"learning_rate": 0.006870229007633588,
|
169 |
+
"loss": 195.9004,
|
170 |
"step": 110
|
171 |
},
|
172 |
{
|
173 |
"epoch": 0.8214285714285714,
|
174 |
+
"grad_norm": 1513.8978271484375,
|
175 |
+
"learning_rate": 0.0057251908396946565,
|
176 |
+
"loss": 188.8404,
|
177 |
"step": 115
|
178 |
},
|
179 |
{
|
180 |
"epoch": 0.8571428571428571,
|
181 |
+
"grad_norm": 5677.81494140625,
|
182 |
+
"learning_rate": 0.004580152671755725,
|
183 |
+
"loss": 176.7594,
|
184 |
"step": 120
|
185 |
},
|
186 |
{
|
187 |
"epoch": 0.8928571428571429,
|
188 |
+
"grad_norm": 726.0751953125,
|
189 |
+
"learning_rate": 0.003435114503816794,
|
190 |
+
"loss": 167.6159,
|
191 |
"step": 125
|
192 |
},
|
193 |
{
|
194 |
"epoch": 0.9285714285714286,
|
195 |
+
"grad_norm": 1151.048095703125,
|
196 |
+
"learning_rate": 0.0022900763358778627,
|
197 |
+
"loss": 155.8075,
|
198 |
"step": 130
|
199 |
},
|
200 |
{
|
201 |
"epoch": 0.9642857142857143,
|
202 |
+
"grad_norm": 3488.1396484375,
|
203 |
+
"learning_rate": 0.0011450381679389313,
|
204 |
+
"loss": 155.3781,
|
205 |
"step": 135
|
206 |
},
|
207 |
{
|
208 |
"epoch": 1.0,
|
209 |
+
"grad_norm": 718.7391357421875,
|
210 |
"learning_rate": 0.0,
|
211 |
+
"loss": 149.2635,
|
212 |
"step": 140
|
213 |
},
|
214 |
{
|
215 |
"epoch": 1.0,
|
216 |
+
"eval_loss": 149.4047088623047,
|
217 |
+
"eval_runtime": 2.8984,
|
218 |
+
"eval_samples_per_second": 61.413,
|
219 |
+
"eval_steps_per_second": 1.035,
|
220 |
"step": 140
|
221 |
},
|
222 |
{
|
223 |
"epoch": 1.0,
|
224 |
"step": 140,
|
225 |
+
"total_flos": 4.2812236905630925e+17,
|
226 |
+
"train_loss": 249.82138957977295,
|
227 |
+
"train_runtime": 1603.7748,
|
228 |
+
"train_samples_per_second": 11.164,
|
229 |
+
"train_steps_per_second": 0.087
|
230 |
}
|
231 |
],
|
232 |
"logging_steps": 5,
|
|
|
246 |
"attributes": {}
|
247 |
}
|
248 |
},
|
249 |
+
"total_flos": 4.2812236905630925e+17,
|
250 |
"train_batch_size": 8,
|
251 |
"trial_name": null,
|
252 |
"trial_params": null
|