Model save
Browse files- README.md +39 -38
- all_results.json +17 -4
- eval_results.json +16 -0
- model.safetensors +1 -1
- runs/Sep10_04-21-15_xe8545-a100-30/events.out.tfevents.1725948816.xe8545-a100-30.2907630.1 +3 -0
- runs/Sep22_07-59-07_xe8545-a100-05/events.out.tfevents.1726985345.xe8545-a100-05.372129.0 +3 -0
- train_results.json +4 -4
- trainer_state.json +0 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -3,6 +3,7 @@ library_name: transformers
|
|
3 |
tags:
|
4 |
- trl
|
5 |
- dpo
|
|
|
6 |
- generated_from_trainer
|
7 |
model-index:
|
8 |
- name: OpenELM-1_1B-DPO-full-max-reward-most-similar
|
@@ -16,15 +17,15 @@ should probably proofread and complete it, then remove this comment. -->
|
|
16 |
|
17 |
This model was trained from scratch on an unknown dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
-
- Loss: 1.
|
20 |
-
- Rewards/chosen: -
|
21 |
-
- Rewards/rejected: -
|
22 |
-
- Rewards/accuracies: 0.
|
23 |
-
- Rewards/margins:
|
24 |
-
- Logps/rejected: -
|
25 |
-
- Logps/chosen: -
|
26 |
-
- Logits/rejected:
|
27 |
-
- Logits/chosen:
|
28 |
|
29 |
## Model description
|
30 |
|
@@ -61,39 +62,39 @@ The following hyperparameters were used during training:
|
|
61 |
|
62 |
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
63 |
|:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
64 |
-
| 0.
|
65 |
-
| 0.
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
-
| 0.
|
72 |
-
| 0.
|
73 |
-
| 0.
|
74 |
-
| 0.
|
75 |
-
| 0.
|
76 |
-
| 0.
|
77 |
-
| 0.
|
78 |
-
| 0.
|
79 |
-
| 0.
|
80 |
-
| 0.
|
81 |
-
| 0.
|
82 |
-
| 0.
|
83 |
-
| 0.
|
84 |
-
| 0.
|
85 |
-
| 0.
|
86 |
-
| 0.
|
87 |
-
| 0.
|
88 |
-
| 0.
|
89 |
-
| 0.
|
90 |
-
| 0.
|
91 |
-
| 0.
|
92 |
|
93 |
|
94 |
### Framework versions
|
95 |
|
96 |
- Transformers 4.44.2
|
97 |
- Pytorch 2.3.0
|
98 |
-
- Datasets
|
99 |
- Tokenizers 0.19.1
|
|
|
3 |
tags:
|
4 |
- trl
|
5 |
- dpo
|
6 |
+
- alignment-handbook
|
7 |
- generated_from_trainer
|
8 |
model-index:
|
9 |
- name: OpenELM-1_1B-DPO-full-max-reward-most-similar
|
|
|
17 |
|
18 |
This model was trained from scratch on an unknown dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 1.6250
|
21 |
+
- Rewards/chosen: -16.375
|
22 |
+
- Rewards/rejected: -18.625
|
23 |
+
- Rewards/accuracies: 0.6055
|
24 |
+
- Rewards/margins: 2.1406
|
25 |
+
- Logps/rejected: -2144.0
|
26 |
+
- Logps/chosen: -1960.0
|
27 |
+
- Logits/rejected: 4.75
|
28 |
+
- Logits/chosen: 3.2969
|
29 |
|
30 |
## Model description
|
31 |
|
|
|
62 |
|
63 |
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
|
64 |
|:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
|
65 |
+
| 0.5787 | 0.1047 | 100 | 0.6727 | -1.3594 | -1.6328 | 0.6133 | 0.2793 | -452.0 | -454.0 | -10.4375 | -10.875 |
|
66 |
+
| 0.5561 | 0.2094 | 200 | 0.7091 | -2.5625 | -3.0625 | 0.6387 | 0.5039 | -596.0 | -576.0 | -3.2969 | -4.3438 |
|
67 |
+
| 0.5165 | 0.3141 | 300 | 0.7539 | -4.5938 | -5.1875 | 0.6035 | 0.6016 | -808.0 | -776.0 | -2.8438 | -3.9844 |
|
68 |
+
| 0.5008 | 0.4188 | 400 | 0.7994 | -4.7188 | -5.3438 | 0.5918 | 0.6094 | -824.0 | -792.0 | -3.5469 | -4.8438 |
|
69 |
+
| 0.505 | 0.5236 | 500 | 0.8047 | -6.2188 | -6.9375 | 0.6016 | 0.7109 | -980.0 | -940.0 | -1.7812 | -3.1406 |
|
70 |
+
| 0.5125 | 0.6283 | 600 | 0.8067 | -5.2812 | -5.9375 | 0.6133 | 0.6562 | -884.0 | -848.0 | -1.3281 | -2.6562 |
|
71 |
+
| 0.4627 | 0.7330 | 700 | 0.9020 | -4.9375 | -5.9062 | 0.6172 | 0.9648 | -880.0 | -812.0 | -2.1719 | -3.7969 |
|
72 |
+
| 0.4583 | 0.8377 | 800 | 0.8516 | -7.3125 | -7.9688 | 0.5488 | 0.6562 | -1088.0 | -1048.0 | -0.8438 | -2.25 |
|
73 |
+
| 0.4454 | 0.9424 | 900 | 0.9168 | -6.6875 | -7.5938 | 0.6094 | 0.9297 | -1048.0 | -984.0 | -3.4375 | -5.0 |
|
74 |
+
| 0.1656 | 1.0471 | 1000 | 1.0883 | -9.1875 | -10.3125 | 0.6055 | 1.1172 | -1320.0 | -1240.0 | 0.2676 | -1.3984 |
|
75 |
+
| 0.1613 | 1.1518 | 1100 | 1.0450 | -9.125 | -10.25 | 0.6152 | 1.1797 | -1312.0 | -1232.0 | 1.8438 | 0.1934 |
|
76 |
+
| 0.1616 | 1.2565 | 1200 | 1.0938 | -10.0625 | -11.1875 | 0.5996 | 1.1484 | -1408.0 | -1320.0 | 0.8789 | -0.5898 |
|
77 |
+
| 0.1677 | 1.3613 | 1300 | 0.9728 | -7.6562 | -8.9375 | 0.6504 | 1.2422 | -1184.0 | -1088.0 | -0.3652 | -1.9297 |
|
78 |
+
| 0.1712 | 1.4660 | 1400 | 1.0911 | -9.8125 | -11.0625 | 0.6016 | 1.2266 | -1392.0 | -1296.0 | -0.3105 | -1.7344 |
|
79 |
+
| 0.1723 | 1.5707 | 1500 | 1.0245 | -10.0 | -11.25 | 0.6172 | 1.3047 | -1416.0 | -1312.0 | 0.6992 | -0.7773 |
|
80 |
+
| 0.1515 | 1.6754 | 1600 | 1.0037 | -9.0 | -10.3125 | 0.6211 | 1.3359 | -1320.0 | -1216.0 | -0.1299 | -1.7109 |
|
81 |
+
| 0.1197 | 1.7801 | 1700 | 1.1304 | -10.75 | -12.125 | 0.5977 | 1.3594 | -1504.0 | -1392.0 | 0.9805 | -0.4961 |
|
82 |
+
| 0.1258 | 1.8848 | 1800 | 1.0837 | -10.5 | -11.8125 | 0.6113 | 1.3359 | -1472.0 | -1368.0 | 2.2188 | 0.6797 |
|
83 |
+
| 0.118 | 1.9895 | 1900 | 1.0529 | -11.5 | -12.75 | 0.6172 | 1.2578 | -1560.0 | -1464.0 | 3.4062 | 2.0 |
|
84 |
+
| 0.03 | 2.0942 | 2000 | 1.4271 | -14.75 | -16.625 | 0.5977 | 1.8516 | -1952.0 | -1792.0 | 4.4375 | 3.0 |
|
85 |
+
| 0.0206 | 2.1990 | 2100 | 1.4637 | -14.75 | -16.75 | 0.6230 | 2.0469 | -1968.0 | -1792.0 | 4.8125 | 3.3594 |
|
86 |
+
| 0.0301 | 2.3037 | 2200 | 1.5228 | -15.4375 | -17.5 | 0.6152 | 2.0469 | -2040.0 | -1864.0 | 4.625 | 3.1875 |
|
87 |
+
| 0.0312 | 2.4084 | 2300 | 1.6112 | -16.375 | -18.5 | 0.6172 | 2.0781 | -2128.0 | -1952.0 | 4.75 | 3.3125 |
|
88 |
+
| 0.0379 | 2.5131 | 2400 | 1.6100 | -16.5 | -18.625 | 0.6113 | 2.1094 | -2144.0 | -1968.0 | 4.5938 | 3.1719 |
|
89 |
+
| 0.0235 | 2.6178 | 2500 | 1.6536 | -17.0 | -19.125 | 0.6113 | 2.1719 | -2208.0 | -2016.0 | 4.75 | 3.3125 |
|
90 |
+
| 0.0198 | 2.7225 | 2600 | 1.6602 | -16.75 | -19.0 | 0.6113 | 2.2031 | -2192.0 | -1992.0 | 4.8125 | 3.3594 |
|
91 |
+
| 0.0251 | 2.8272 | 2700 | 1.6070 | -16.25 | -18.375 | 0.6133 | 2.1406 | -2128.0 | -1936.0 | 4.7188 | 3.2656 |
|
92 |
+
| 0.0213 | 2.9319 | 2800 | 1.6250 | -16.375 | -18.625 | 0.6055 | 2.1406 | -2144.0 | -1960.0 | 4.75 | 3.2969 |
|
93 |
|
94 |
|
95 |
### Framework versions
|
96 |
|
97 |
- Transformers 4.44.2
|
98 |
- Pytorch 2.3.0
|
99 |
+
- Datasets 3.0.0
|
100 |
- Tokenizers 0.19.1
|
all_results.json
CHANGED
@@ -1,9 +1,22 @@
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
"total_flos": 0.0,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 61119,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
+
"eval_logits/chosen": -16.75,
|
4 |
+
"eval_logits/rejected": -16.5,
|
5 |
+
"eval_logps/chosen": -880.0,
|
6 |
+
"eval_logps/rejected": -900.0,
|
7 |
+
"eval_loss": 1.452078104019165,
|
8 |
+
"eval_rewards/accuracies": 0.521484375,
|
9 |
+
"eval_rewards/chosen": -5.59375,
|
10 |
+
"eval_rewards/margins": 0.5078125,
|
11 |
+
"eval_rewards/rejected": -6.125,
|
12 |
+
"eval_runtime": 46.5435,
|
13 |
+
"eval_samples": 2000,
|
14 |
+
"eval_samples_per_second": 42.971,
|
15 |
+
"eval_steps_per_second": 0.688,
|
16 |
"total_flos": 0.0,
|
17 |
+
"train_loss": 0.2317357657987618,
|
18 |
+
"train_runtime": 12117.9535,
|
19 |
"train_samples": 61119,
|
20 |
+
"train_samples_per_second": 15.131,
|
21 |
+
"train_steps_per_second": 0.236
|
22 |
}
|
eval_results.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 3.0,
|
3 |
+
"eval_logits/chosen": -16.75,
|
4 |
+
"eval_logits/rejected": -16.5,
|
5 |
+
"eval_logps/chosen": -880.0,
|
6 |
+
"eval_logps/rejected": -900.0,
|
7 |
+
"eval_loss": 1.452078104019165,
|
8 |
+
"eval_rewards/accuracies": 0.521484375,
|
9 |
+
"eval_rewards/chosen": -5.59375,
|
10 |
+
"eval_rewards/margins": 0.5078125,
|
11 |
+
"eval_rewards/rejected": -6.125,
|
12 |
+
"eval_runtime": 46.5435,
|
13 |
+
"eval_samples": 2000,
|
14 |
+
"eval_samples_per_second": 42.971,
|
15 |
+
"eval_steps_per_second": 0.688
|
16 |
+
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2159808696
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ca2a60db1d3bb7d6ded1291b2adf740c9b34a6df1c619d75521cba92fb49ae3
|
3 |
size 2159808696
|
runs/Sep10_04-21-15_xe8545-a100-30/events.out.tfevents.1725948816.xe8545-a100-30.2907630.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb22a0e6ce1da25071e7321586d6a9ff70d08e98ca00062a17993fed05b956b9
|
3 |
+
size 828
|
runs/Sep22_07-59-07_xe8545-a100-05/events.out.tfevents.1726985345.xe8545-a100-05.372129.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f61b2449eb581b6cd7f4beffe6fc1f1d4d1b7fefeb77973b01131558b3203800
|
3 |
+
size 225710
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
"total_flos": 0.0,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 61119,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
"total_flos": 0.0,
|
4 |
+
"train_loss": 0.2317357657987618,
|
5 |
+
"train_runtime": 12117.9535,
|
6 |
"train_samples": 61119,
|
7 |
+
"train_samples_per_second": 15.131,
|
8 |
+
"train_steps_per_second": 0.236
|
9 |
}
|
trainer_state.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92e905d133a708a0b8020e704cf1d92c75f18455eeb84e040aa36274e00728a8
|
3 |
+
size 7672
|