amazingvince commited on
Commit
68e0893
1 Parent(s): 097d20b

Model save

Browse files
README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: amazingvince/zephyr-220m-sft-full
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: zephyr-220m-dpo-full
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # zephyr-220m-dpo-full
15
+
16
+ This model is a fine-tuned version of [amazingvince/zephyr-220m-sft-full](https://huggingface.co/amazingvince/zephyr-220m-sft-full) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.5608
19
+ - Rewards/chosen: 0.4691
20
+ - Rewards/rejected: -0.0455
21
+ - Rewards/accuracies: 0.6930
22
+ - Rewards/margins: 0.5145
23
+ - Logps/rejected: -438.4595
24
+ - Logps/chosen: -544.6858
25
+ - Logits/rejected: -4.0092
26
+ - Logits/chosen: -3.9839
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 4
48
+ - seed: 42
49
+ - distributed_type: multi-GPU
50
+ - num_devices: 2
51
+ - total_train_batch_size: 16
52
+ - total_eval_batch_size: 8
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: linear
55
+ - lr_scheduler_warmup_ratio: 0.1
56
+ - num_epochs: 1
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
+ | 0.6906 | 0.03 | 100 | 0.6932 | 0.0008 | 0.0007 | 0.4860 | 0.0002 | -437.9984 | -549.3683 | -4.0893 | -4.0515 |
63
+ | 0.6844 | 0.05 | 200 | 0.6855 | 0.0323 | 0.0173 | 0.5640 | 0.0150 | -437.8319 | -549.0540 | -4.0871 | -4.0501 |
64
+ | 0.6685 | 0.08 | 300 | 0.6675 | 0.1075 | 0.0537 | 0.6160 | 0.0538 | -437.4682 | -548.3016 | -4.0788 | -4.0432 |
65
+ | 0.6579 | 0.1 | 400 | 0.6426 | 0.2153 | 0.0941 | 0.6430 | 0.1212 | -437.0637 | -547.2234 | -4.0645 | -4.0309 |
66
+ | 0.6331 | 0.13 | 500 | 0.6241 | 0.2980 | 0.1106 | 0.6430 | 0.1874 | -436.8989 | -546.3970 | -4.0525 | -4.0221 |
67
+ | 0.6229 | 0.15 | 600 | 0.6138 | 0.3428 | 0.1103 | 0.6580 | 0.2325 | -436.9023 | -545.9487 | -4.0402 | -4.0116 |
68
+ | 0.6008 | 0.18 | 700 | 0.6053 | 0.3822 | 0.0970 | 0.6560 | 0.2852 | -437.0354 | -545.5550 | -4.0301 | -4.0042 |
69
+ | 0.5751 | 0.21 | 800 | 0.5998 | 0.4077 | 0.0879 | 0.6540 | 0.3198 | -437.1260 | -545.2994 | -4.0359 | -4.0099 |
70
+ | 0.6485 | 0.23 | 900 | 0.5922 | 0.4208 | 0.0655 | 0.6600 | 0.3553 | -437.3501 | -545.1683 | -4.0167 | -3.9936 |
71
+ | 0.6164 | 0.26 | 1000 | 0.5880 | 0.4046 | 0.0287 | 0.6620 | 0.3759 | -437.7182 | -545.3309 | -4.0092 | -3.9869 |
72
+ | 0.6225 | 0.28 | 1100 | 0.5852 | 0.4058 | 0.0110 | 0.6680 | 0.3948 | -437.8951 | -545.3189 | -4.0240 | -3.9984 |
73
+ | 0.6289 | 0.31 | 1200 | 0.5824 | 0.4127 | 0.0078 | 0.6670 | 0.4048 | -437.9265 | -545.2498 | -4.0253 | -3.9994 |
74
+ | 0.5818 | 0.34 | 1300 | 0.5818 | 0.4222 | 0.0097 | 0.6680 | 0.4125 | -437.9080 | -545.1544 | -4.0212 | -3.9953 |
75
+ | 0.567 | 0.36 | 1400 | 0.5797 | 0.4098 | -0.0141 | 0.6730 | 0.4238 | -438.1456 | -545.2791 | -4.0333 | -4.0062 |
76
+ | 0.5659 | 0.39 | 1500 | 0.5790 | 0.4204 | -0.0154 | 0.6780 | 0.4358 | -438.1591 | -545.1725 | -4.0245 | -3.9963 |
77
+ | 0.5993 | 0.41 | 1600 | 0.5783 | 0.4161 | -0.0285 | 0.6720 | 0.4446 | -438.2904 | -545.2161 | -4.0185 | -3.9907 |
78
+ | 0.5999 | 0.44 | 1700 | 0.5767 | 0.4067 | -0.0468 | 0.6840 | 0.4535 | -438.4729 | -545.3095 | -4.0207 | -3.9935 |
79
+ | 0.6004 | 0.46 | 1800 | 0.5731 | 0.4233 | -0.0394 | 0.6830 | 0.4627 | -438.3991 | -545.1437 | -4.0219 | -3.9944 |
80
+ | 0.5349 | 0.49 | 1900 | 0.5720 | 0.4285 | -0.0429 | 0.6830 | 0.4714 | -438.4335 | -545.0914 | -4.0295 | -4.0012 |
81
+ | 0.5377 | 0.52 | 2000 | 0.5702 | 0.4255 | -0.0540 | 0.6850 | 0.4795 | -438.5449 | -545.1220 | -4.0290 | -4.0009 |
82
+ | 0.4988 | 0.54 | 2100 | 0.5713 | 0.4347 | -0.0548 | 0.6840 | 0.4895 | -438.5533 | -545.0299 | -4.0317 | -4.0039 |
83
+ | 0.6093 | 0.57 | 2200 | 0.5706 | 0.4464 | -0.0456 | 0.6810 | 0.4920 | -438.4607 | -544.9128 | -4.0288 | -4.0014 |
84
+ | 0.5356 | 0.59 | 2300 | 0.5689 | 0.4484 | -0.0486 | 0.6880 | 0.4971 | -438.4912 | -544.8922 | -4.0257 | -3.9986 |
85
+ | 0.5753 | 0.62 | 2400 | 0.5681 | 0.4596 | -0.0441 | 0.6850 | 0.5037 | -438.4457 | -544.7802 | -4.0100 | -3.9846 |
86
+ | 0.5709 | 0.65 | 2500 | 0.5673 | 0.4693 | -0.0387 | 0.6910 | 0.5081 | -438.3924 | -544.6835 | -4.0100 | -3.9849 |
87
+ | 0.5565 | 0.67 | 2600 | 0.5665 | 0.4692 | -0.0401 | 0.6820 | 0.5092 | -438.4054 | -544.6850 | -4.0096 | -3.9843 |
88
+ | 0.585 | 0.7 | 2700 | 0.5650 | 0.4780 | -0.0351 | 0.6940 | 0.5131 | -438.3558 | -544.5962 | -4.0074 | -3.9820 |
89
+ | 0.5883 | 0.72 | 2800 | 0.5670 | 0.4914 | -0.0151 | 0.6880 | 0.5066 | -438.1562 | -544.4624 | -3.9894 | -3.9669 |
90
+ | 0.624 | 0.75 | 2900 | 0.5663 | 0.4877 | -0.0191 | 0.6840 | 0.5068 | -438.1958 | -544.4997 | -3.9935 | -3.9705 |
91
+ | 0.5347 | 0.77 | 3000 | 0.5644 | 0.4757 | -0.0335 | 0.6850 | 0.5092 | -438.3401 | -544.6199 | -4.0019 | -3.9777 |
92
+ | 0.5837 | 0.8 | 3100 | 0.5637 | 0.4783 | -0.0302 | 0.6830 | 0.5085 | -438.3073 | -544.5936 | -3.9976 | -3.9742 |
93
+ | 0.5293 | 0.83 | 3200 | 0.5634 | 0.4715 | -0.0363 | 0.6890 | 0.5078 | -438.3679 | -544.6616 | -4.0023 | -3.9778 |
94
+ | 0.5128 | 0.85 | 3300 | 0.5620 | 0.4745 | -0.0387 | 0.6880 | 0.5131 | -438.3917 | -544.6319 | -4.0053 | -3.9804 |
95
+ | 0.6204 | 0.88 | 3400 | 0.5625 | 0.4679 | -0.0442 | 0.6860 | 0.5121 | -438.4469 | -544.6978 | -4.0067 | -3.9815 |
96
+ | 0.5469 | 0.9 | 3500 | 0.5618 | 0.4612 | -0.0491 | 0.6860 | 0.5102 | -438.4956 | -544.7651 | -4.0098 | -3.9843 |
97
+ | 0.5807 | 0.93 | 3600 | 0.5615 | 0.4675 | -0.0454 | 0.6890 | 0.5129 | -438.4584 | -544.7015 | -4.0068 | -3.9818 |
98
+ | 0.5265 | 0.96 | 3700 | 0.5620 | 0.4675 | -0.0435 | 0.6880 | 0.5110 | -438.4403 | -544.7019 | -4.0082 | -3.9833 |
99
+ | 0.5484 | 0.98 | 3800 | 0.5615 | 0.4685 | -0.0449 | 0.6930 | 0.5133 | -438.4536 | -544.6919 | -4.0103 | -3.9851 |
100
+
101
+
102
+ ### Framework versions
103
+
104
+ - Transformers 4.37.0.dev0
105
+ - Pytorch 2.1.2+cu121
106
+ - Datasets 2.15.0
107
+ - Tokenizers 0.15.0
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|system|>": 32002,
4
+ "<|user|>": 32000
5
+ }
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -3.9838693141937256,
4
+ "eval_logits/rejected": -4.009171962738037,
5
+ "eval_logps/chosen": -544.685791015625,
6
+ "eval_logps/rejected": -438.45953369140625,
7
+ "eval_loss": 0.5608103275299072,
8
+ "eval_rewards/accuracies": 0.6930000185966492,
9
+ "eval_rewards/chosen": 0.4690808057785034,
10
+ "eval_rewards/margins": 0.5145381689071655,
11
+ "eval_rewards/rejected": -0.04545731097459793,
12
+ "eval_runtime": 146.3889,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 13.662,
15
+ "eval_steps_per_second": 1.708,
16
+ "train_loss": 0.5913154047772216,
17
+ "train_runtime": 14580.3501,
18
+ "train_samples": 61966,
19
+ "train_samples_per_second": 4.25,
20
+ "train_steps_per_second": 0.266
21
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "amazingvince/zephyr-220m-sft-full",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "max_position_embeddings": 2048,
15
+ "model_type": "mistral",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 10,
18
+ "num_key_value_heads": 8,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000.0,
23
+ "sliding_window": 4096,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.37.0.dev0",
27
+ "use_cache": false,
28
+ "vocab_size": 32128
29
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": -3.9838693141937256,
4
+ "eval_logits/rejected": -4.009171962738037,
5
+ "eval_logps/chosen": -544.685791015625,
6
+ "eval_logps/rejected": -438.45953369140625,
7
+ "eval_loss": 0.5608103275299072,
8
+ "eval_rewards/accuracies": 0.6930000185966492,
9
+ "eval_rewards/chosen": 0.4690808057785034,
10
+ "eval_rewards/margins": 0.5145381689071655,
11
+ "eval_rewards/rejected": -0.04545731097459793,
12
+ "eval_runtime": 146.3889,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 13.662,
15
+ "eval_steps_per_second": 1.708
16
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.37.0.dev0",
6
+ "use_cache": false
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46fb0ed8fd9958ffe9d56bdc8222681070391eb732da7c1e40682e304095393
3
+ size 435736840
runs/Jan04_13-21-14_VincentPC/events.out.tfevents.1704392512.VincentPC.2055.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af68066c4ab02f4be890ad65dbfb937bb5a7e3d3a7869746bbc87344716b723e
3
+ size 6411
runs/Jan04_13-23-57_VincentPC/events.out.tfevents.1704392649.VincentPC.4262.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a63719a15efd94f3818dfeab537d4a37979eee20b85f576fc2d63fa6befc8a
3
+ size 278852
runs/Jan04_13-23-57_VincentPC/events.out.tfevents.1704407375.VincentPC.4262.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a8ee2defead3e1ff4c95b90ca3567c7ca02d3d34c1851621adbb0d53811e83
3
+ size 828
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|user|>",
4
+ "<|assistant|>",
5
+ "<|system|>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<|user|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|assistant|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32002": {
46
+ "content": "<|system|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<|user|>",
56
+ "<|assistant|>",
57
+ "<|system|>"
58
+ ],
59
+ "bos_token": "<s>",
60
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
61
+ "clean_up_tokenization_spaces": false,
62
+ "eos_token": "</s>",
63
+ "legacy": false,
64
+ "max_length": 2048,
65
+ "model_max_length": 2048,
66
+ "pad_token": "<unk>",
67
+ "padding_side": "right",
68
+ "sp_model_kwargs": {},
69
+ "spaces_between_special_tokens": false,
70
+ "stride": 0,
71
+ "tokenizer_class": "LlamaTokenizer",
72
+ "truncation_side": "left",
73
+ "truncation_strategy": "longest_first",
74
+ "trust_remote_code": false,
75
+ "unk_token": "<unk>",
76
+ "use_default_system_prompt": true,
77
+ "use_fast": true
78
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.5913154047772216,
4
+ "train_runtime": 14580.3501,
5
+ "train_samples": 61966,
6
+ "train_samples_per_second": 4.25,
7
+ "train_steps_per_second": 0.266
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99b56d42550e2842aaa75a1dfb654bded6195284ea68aaa0c9ddbe09a6c5a80a
3
+ size 5816