Model save
Browse files- README.md +34 -31
- all_results.json +6 -5
- config.json +2 -1
- generation_config.json +1 -1
- model-00001-of-00003.safetensors +1 -1
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
- tokenizer.json +1 -0
- tokenizer_config.json +2 -1
- train_results.json +6 -5
- trainer_state.json +0 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -2,31 +2,34 @@
|
|
2 |
license: mit
|
3 |
base_model: HuggingFaceH4/mistral-7b-sft-beta
|
4 |
tags:
|
5 |
-
-
|
|
|
6 |
- generated_from_trainer
|
7 |
-
datasets:
|
8 |
-
- HuggingFaceH4/hh-rlhf-h4
|
9 |
model-index:
|
10 |
-
- name:
|
11 |
results: []
|
12 |
---
|
13 |
|
14 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
should probably proofread and complete it, then remove this comment. -->
|
16 |
|
17 |
-
|
|
|
18 |
|
19 |
-
This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the
|
20 |
It achieves the following results on the evaluation set:
|
21 |
-
- Loss: 0.
|
22 |
-
- Rewards/chosen: -2.
|
23 |
-
- Rewards/rejected: -
|
24 |
-
- Rewards/accuracies: 0.
|
25 |
-
- Rewards/margins: 0.
|
26 |
-
- Logps/rejected: -
|
27 |
-
- Logps/chosen: -
|
28 |
-
- Logits/rejected:
|
29 |
-
- Logits/chosen:
|
|
|
|
|
|
|
30 |
|
31 |
## Model description
|
32 |
|
@@ -61,25 +64,25 @@ The following hyperparameters were used during training:
|
|
61 |
|
62 |
### Training results
|
63 |
|
64 |
-
| Training Loss | Epoch
|
65 |
-
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
-
| 0.
|
72 |
-
| 0.
|
73 |
-
| 0.
|
74 |
-
| 0.
|
75 |
-
| 0.
|
76 |
-
| 0.
|
77 |
-
| 0.
|
78 |
|
79 |
|
80 |
### Framework versions
|
81 |
|
82 |
-
- Transformers 4.
|
83 |
- Pytorch 2.1.2+cu121
|
84 |
- Datasets 2.14.6
|
85 |
-
- Tokenizers 0.
|
|
|
2 |
license: mit
|
3 |
base_model: HuggingFaceH4/mistral-7b-sft-beta
|
4 |
tags:
|
5 |
+
- trl
|
6 |
+
- dpo
|
7 |
- generated_from_trainer
|
|
|
|
|
8 |
model-index:
|
9 |
+
- name: zephyr-7b-dpo-full
|
10 |
results: []
|
11 |
---
|
12 |
|
13 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
should probably proofread and complete it, then remove this comment. -->
|
15 |
|
16 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanqiang/wdpo/runs/p3jwj9bo)
|
17 |
+
# zephyr-7b-dpo-full
|
18 |
|
19 |
+
This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.0256
|
22 |
+
- Rewards/chosen: -2.0365
|
23 |
+
- Rewards/rejected: -2.5297
|
24 |
+
- Rewards/accuracies: 0.6950
|
25 |
+
- Rewards/margins: 0.4933
|
26 |
+
- Logps/rejected: -403.6735
|
27 |
+
- Logps/chosen: -347.8913
|
28 |
+
- Logits/rejected: -2.1603
|
29 |
+
- Logits/chosen: -2.1828
|
30 |
+
- Debug/policy Weights: 0.0416
|
31 |
+
- Debug/losses: 0.0243
|
32 |
+
- Debug/raw Losses: 0.5731
|
33 |
|
34 |
## Model description
|
35 |
|
|
|
64 |
|
65 |
### Training results
|
66 |
|
67 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Debug/policy Weights | Debug/losses | Debug/raw Losses |
|
68 |
+
|:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:--------------------:|:------------:|:----------------:|
|
69 |
+
| 0.1731 | 0.0796 | 100 | 0.1627 | -0.1434 | -0.1775 | 0.5961 | 0.0341 | -168.4450 | -158.5804 | -2.7045 | -2.7126 | 0.2376 | 0.1613 | 0.6787 |
|
70 |
+
| 0.0637 | 0.1592 | 200 | 0.0668 | -0.9118 | -1.1252 | 0.6455 | 0.2134 | -263.2193 | -235.4248 | -2.4769 | -2.4894 | 0.1048 | 0.0652 | 0.6301 |
|
71 |
+
| 0.0398 | 0.2388 | 300 | 0.0421 | -1.5345 | -1.8565 | 0.6446 | 0.3220 | -336.3452 | -297.6896 | -2.4777 | -2.4926 | 0.0656 | 0.0401 | 0.6158 |
|
72 |
+
| 0.0268 | 0.3183 | 400 | 0.0274 | -1.9929 | -2.3663 | 0.6437 | 0.3735 | -387.3311 | -343.5292 | -2.2480 | -2.2673 | 0.0425 | 0.0260 | 0.6099 |
|
73 |
+
| 0.0286 | 0.3979 | 500 | 0.0340 | -1.8450 | -2.2365 | 0.6539 | 0.3916 | -374.3529 | -328.7424 | -2.3185 | -2.3383 | 0.0541 | 0.0326 | 0.6004 |
|
74 |
+
| 0.0304 | 0.4775 | 600 | 0.0296 | -1.9424 | -2.3790 | 0.6735 | 0.4366 | -388.5944 | -338.4821 | -2.1888 | -2.2094 | 0.0468 | 0.0278 | 0.5888 |
|
75 |
+
| 0.0289 | 0.5571 | 700 | 0.0279 | -1.9248 | -2.3277 | 0.6828 | 0.4030 | -383.4731 | -336.7225 | -2.2155 | -2.2362 | 0.0447 | 0.0266 | 0.5876 |
|
76 |
+
| 0.0235 | 0.6367 | 800 | 0.0245 | -2.0777 | -2.5498 | 0.6884 | 0.4720 | -405.6762 | -352.0160 | -2.1066 | -2.1293 | 0.0392 | 0.0231 | 0.5835 |
|
77 |
+
| 0.0333 | 0.7163 | 900 | 0.0342 | -1.7749 | -2.2999 | 0.6856 | 0.5250 | -380.6898 | -321.7296 | -2.1171 | -2.1415 | 0.0554 | 0.0321 | 0.5741 |
|
78 |
+
| 0.0233 | 0.7959 | 1000 | 0.0238 | -2.2080 | -2.6970 | 0.6950 | 0.4891 | -420.4027 | -365.0407 | -2.1112 | -2.1340 | 0.0381 | 0.0223 | 0.5775 |
|
79 |
+
| 0.0253 | 0.8754 | 1100 | 0.0261 | -2.0131 | -2.5002 | 0.6912 | 0.4871 | -400.7220 | -345.5524 | -2.1743 | -2.1963 | 0.0424 | 0.0247 | 0.5737 |
|
80 |
+
| 0.0244 | 0.9550 | 1200 | 0.0256 | -2.0365 | -2.5297 | 0.6950 | 0.4933 | -403.6735 | -347.8913 | -2.1603 | -2.1828 | 0.0416 | 0.0243 | 0.5731 |
|
81 |
|
82 |
|
83 |
### Framework versions
|
84 |
|
85 |
+
- Transformers 4.41.0.dev0
|
86 |
- Pytorch 2.1.2+cu121
|
87 |
- Datasets 2.14.6
|
88 |
+
- Tokenizers 0.19.1
|
all_results.json
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"
|
4 |
-
"
|
|
|
5 |
"train_samples": 160800,
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 0.9996020692399522,
|
3 |
+
"total_flos": 0.0,
|
4 |
+
"train_loss": 0.04845805117374013,
|
5 |
+
"train_runtime": 10274.1816,
|
6 |
"train_samples": 160800,
|
7 |
+
"train_samples_per_second": 15.651,
|
8 |
+
"train_steps_per_second": 0.122
|
9 |
}
|
config.json
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
"architectures": [
|
4 |
"MistralForCausalLM"
|
5 |
],
|
|
|
6 |
"bos_token_id": 1,
|
7 |
"eos_token_id": 2,
|
8 |
"hidden_act": "silu",
|
@@ -19,7 +20,7 @@
|
|
19 |
"sliding_window": 4096,
|
20 |
"tie_word_embeddings": false,
|
21 |
"torch_dtype": "bfloat16",
|
22 |
-
"transformers_version": "4.
|
23 |
"use_cache": false,
|
24 |
"vocab_size": 32000
|
25 |
}
|
|
|
3 |
"architectures": [
|
4 |
"MistralForCausalLM"
|
5 |
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
"bos_token_id": 1,
|
8 |
"eos_token_id": 2,
|
9 |
"hidden_act": "silu",
|
|
|
20 |
"sliding_window": 4096,
|
21 |
"tie_word_embeddings": false,
|
22 |
"torch_dtype": "bfloat16",
|
23 |
+
"transformers_version": "4.41.0.dev0",
|
24 |
"use_cache": false,
|
25 |
"vocab_size": 32000
|
26 |
}
|
generation_config.json
CHANGED
@@ -2,5 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"transformers_version": "4.
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.41.0.dev0"
|
6 |
}
|
model-00001-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4943162336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:784f4cda7f164811e7c3bff436b7a581ee244805e7990bba245355b8652049aa
|
3 |
size 4943162336
|
model-00002-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4999819336
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cad95a625583fe4eff948eda5f56c518a6068f3bcb95042122cef3b9788487c3
|
3 |
size 4999819336
|
model-00003-of-00003.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4540516344
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51b810878e8730342266d454af4f87ae1547c48ba92cbc706ff45c4962cf89f3
|
3 |
size 4540516344
|
tokenizer.json
CHANGED
@@ -134,6 +134,7 @@
|
|
134 |
"end_of_word_suffix": null,
|
135 |
"fuse_unk": true,
|
136 |
"byte_fallback": true,
|
|
|
137 |
"vocab": {
|
138 |
"<unk>": 0,
|
139 |
"<s>": 1,
|
|
|
134 |
"end_of_word_suffix": null,
|
135 |
"fuse_unk": true,
|
136 |
"byte_fallback": true,
|
137 |
+
"ignore_merges": false,
|
138 |
"vocab": {
|
139 |
"<unk>": 0,
|
140 |
"<s>": 1,
|
tokenizer_config.json
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
{
|
|
|
|
|
2 |
"added_tokens_decoder": {
|
3 |
"0": {
|
4 |
"content": "<unk>",
|
@@ -34,7 +36,6 @@
|
|
34 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
35 |
"clean_up_tokenization_spaces": false,
|
36 |
"eos_token": "</s>",
|
37 |
-
"legacy": true,
|
38 |
"model_max_length": 2048,
|
39 |
"pad_token": "</s>",
|
40 |
"sp_model_kwargs": {},
|
|
|
1 |
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
"added_tokens_decoder": {
|
5 |
"0": {
|
6 |
"content": "<unk>",
|
|
|
36 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
37 |
"clean_up_tokenization_spaces": false,
|
38 |
"eos_token": "</s>",
|
|
|
39 |
"model_max_length": 2048,
|
40 |
"pad_token": "</s>",
|
41 |
"sp_model_kwargs": {},
|
train_results.json
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"
|
4 |
-
"
|
|
|
5 |
"train_samples": 160800,
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 0.9996020692399522,
|
3 |
+
"total_flos": 0.0,
|
4 |
+
"train_loss": 0.04845805117374013,
|
5 |
+
"train_runtime": 10274.1816,
|
6 |
"train_samples": 160800,
|
7 |
+
"train_samples_per_second": 15.651,
|
8 |
+
"train_steps_per_second": 0.122
|
9 |
}
|
trainer_state.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a78f94b93b086e56c1a4c9d34b2979b13b6f65cc770836bb1f02ce1951408ca9
|
3 |
+
size 6456
|