Text Generation
Transformers
Safetensors
mistral
Generated from Trainer
conversational
Inference Endpoints
text-generation-inference
plaguss commited on
Commit
0941063
1 Parent(s): 5b46304

Model save

Browse files
README.md CHANGED
@@ -1,32 +1,29 @@
1
  ---
2
  license: apache-2.0
3
- base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
5
- - alignment-handbook
6
  - generated_from_trainer
7
- datasets:
8
- - argilla/10k_prompts_SPIN_iter0_zephyr_top
9
  model-index:
10
- - name: outputs
11
  results: []
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # outputs
18
 
19
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the argilla/10k_prompts_SPIN_iter0_zephyr_top dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.2359
22
- - Rewards/real: 1.3255
23
- - Rewards/generated: -0.8966
24
  - Rewards/accuracies: 0.9792
25
- - Rewards/margins: 2.2221
26
- - Logps/generated: -309.8145
27
- - Logps/real: -304.9670
28
- - Logits/generated: -2.7558
29
- - Logits/real: -2.7547
30
 
31
  ## Model description
32
 
@@ -63,8 +60,10 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/real | Rewards/generated | Rewards/accuracies | Rewards/margins | Logps/generated | Logps/real | Logits/generated | Logits/real |
65
  |:-------------:|:-----:|:----:|:---------------:|:------------:|:-----------------:|:------------------:|:---------------:|:---------------:|:----------:|:----------------:|:-----------:|
66
- | 0.3011 | 0.96 | 25 | 0.2442 | 1.1606 | -0.9851 | 0.9792 | 2.1457 | -310.6989 | -306.6157 | -2.7644 | -2.7641 |
67
- | 0.0376 | 1.92 | 50 | 0.2359 | 1.3255 | -0.8966 | 0.9792 | 2.2221 | -309.8145 | -304.9670 | -2.7558 | -2.7547 |
 
 
68
 
69
 
70
  ### Framework versions
 
1
  ---
2
  license: apache-2.0
3
+ base_model: plaguss/zephyr-7b-spin-iter0-v0
4
  tags:
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
+ - name: zephyr-7b-spin-iter1-v0
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
+ # zephyr-7b-spin-iter1-v0
15
 
16
+ This model is a fine-tuned version of [plaguss/zephyr-7b-spin-iter0-v0](https://huggingface.co/plaguss/zephyr-7b-spin-iter0-v0) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.0831
19
+ - Rewards/real: 1.3037
20
+ - Rewards/generated: -5.4434
21
  - Rewards/accuracies: 0.9792
22
+ - Rewards/margins: 6.7471
23
+ - Logps/generated: -545.0309
24
+ - Logps/real: -272.3726
25
+ - Logits/generated: -2.6844
26
+ - Logits/real: -2.7197
27
 
28
  ## Model description
29
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/real | Rewards/generated | Rewards/accuracies | Rewards/margins | Logps/generated | Logps/real | Logits/generated | Logits/real |
62
  |:-------------:|:-----:|:----:|:---------------:|:------------:|:-----------------:|:------------------:|:---------------:|:---------------:|:----------:|:----------------:|:-----------:|
63
+ | 0.1827 | 0.49 | 25 | 0.1651 | 0.1714 | -3.3650 | 0.9688 | 3.5364 | -524.2469 | -283.6962 | -2.7482 | -2.7944 |
64
+ | 0.0462 | 0.97 | 50 | 0.0835 | 1.4823 | -4.4998 | 1.0 | 5.9821 | -535.5947 | -270.5871 | -2.6963 | -2.7356 |
65
+ | 0.0047 | 1.46 | 75 | 0.0837 | 1.3725 | -5.2500 | 0.9896 | 6.6225 | -543.0965 | -271.6846 | -2.6847 | -2.7211 |
66
+ | 0.0034 | 1.94 | 100 | 0.0831 | 1.3037 | -5.4434 | 0.9792 | 6.7471 | -545.0309 | -272.3726 | -2.6844 | -2.7197 |
67
 
68
 
69
  ### Framework versions
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "train_loss": 0.2113667087486157,
4
- "train_runtime": 1162.1581,
5
- "train_samples": 1648,
6
- "train_samples_per_second": 2.836,
7
  "train_steps_per_second": 0.045
8
  }
 
1
  {
2
+ "epoch": 1.98,
3
+ "train_loss": 0.09802283835140806,
4
+ "train_runtime": 2267.6641,
5
+ "train_samples": 3296,
6
+ "train_samples_per_second": 2.907,
7
  "train_steps_per_second": 0.045
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b57c4c82684914d3b1f9f2f7631034d52a02ac14d7bfbe136280e383f32cc294
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:692333829b71c45499efd2758619021ab865b5466e18c39d54b0513f17b21c96
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:456848aaa337d93707cfa3b47a7be5953b20a81cbf3a351293582933f1398781
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2776af7f7fd4970ee2109cd4a99f755cc91babdbb7b300d9c7f6aff27b47589
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da6ba25be6087ce9a2fcadf2792cab1d36b2e3b4d2ebe988876d2b0f3bb733ac
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c9d190f377227e3e89b7c96490ab1eac0822345aaf018caa80cb93af167bbc9
3
  size 4540516344
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.0,
3
- "train_loss": 0.2113667087486157,
4
- "train_runtime": 1162.1581,
5
- "train_samples": 1648,
6
- "train_samples_per_second": 2.836,
7
  "train_steps_per_second": 0.045
8
  }
 
1
  {
2
+ "epoch": 1.98,
3
+ "train_loss": 0.09802283835140806,
4
+ "train_runtime": 2267.6641,
5
+ "train_samples": 3296,
6
+ "train_samples_per_second": 2.907,
7
  "train_steps_per_second": 0.045
8
  }
trainer_state.json CHANGED
@@ -1,20 +1,20 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 25,
6
- "global_step": 52,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.04,
13
- "learning_rate": 8.333333333333333e-08,
14
- "logits/generated": -2.788468599319458,
15
- "logits/real": -2.8911099433898926,
16
- "logps/generated": -226.66921997070312,
17
- "logps/real": -283.6243896484375,
18
  "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/generated": 0.0,
@@ -23,119 +23,221 @@
23
  "step": 1
24
  },
25
  {
26
- "epoch": 0.38,
27
- "learning_rate": 4.5652173913043473e-07,
28
- "logits/generated": -2.8515138626098633,
29
- "logits/real": -2.8768396377563477,
30
- "logps/generated": -354.09619140625,
31
- "logps/real": -350.52911376953125,
32
- "loss": 0.522,
33
- "rewards/accuracies": 0.7569444179534912,
34
- "rewards/generated": -0.21337264776229858,
35
- "rewards/margins": 0.5563015341758728,
36
- "rewards/real": 0.3429288864135742,
37
  "step": 10
38
  },
39
  {
40
- "epoch": 0.77,
41
- "learning_rate": 3.478260869565217e-07,
42
- "logits/generated": -2.792628049850464,
43
- "logits/real": -2.7778868675231934,
44
- "logps/generated": -351.04638671875,
45
- "logps/real": -327.13482666015625,
46
- "loss": 0.3011,
47
- "rewards/accuracies": 0.90625,
48
- "rewards/generated": -0.7889599800109863,
49
- "rewards/margins": 1.546514868736267,
50
- "rewards/real": 0.7575550675392151,
51
  "step": 20
52
  },
53
  {
54
- "epoch": 0.96,
55
- "eval_logits/generated": -2.764375686645508,
56
- "eval_logits/real": -2.7640507221221924,
57
- "eval_logps/generated": -310.69891357421875,
58
- "eval_logps/real": -306.61572265625,
59
- "eval_loss": 0.24416939914226532,
60
- "eval_rewards/accuracies": 0.9791666865348816,
61
- "eval_rewards/generated": -0.9850902557373047,
62
- "eval_rewards/margins": 2.14570689201355,
63
- "eval_rewards/real": 1.1606166362762451,
64
- "eval_runtime": 27.6861,
65
- "eval_samples_per_second": 6.646,
66
- "eval_steps_per_second": 0.217,
67
  "step": 25
68
  },
69
  {
70
- "epoch": 1.15,
71
- "learning_rate": 2.391304347826087e-07,
72
- "logits/generated": -2.7530007362365723,
73
- "logits/real": -2.734692096710205,
74
- "logps/generated": -310.22607421875,
75
- "logps/real": -306.02044677734375,
76
- "loss": 0.1788,
77
- "rewards/accuracies": 0.9312499761581421,
78
- "rewards/generated": -1.791497826576233,
79
- "rewards/margins": 3.7750840187072754,
80
- "rewards/real": 1.9835857152938843,
81
  "step": 30
82
  },
83
  {
84
- "epoch": 1.54,
85
- "learning_rate": 1.3043478260869563e-07,
86
- "logits/generated": -2.7655322551727295,
87
- "logits/real": -2.776773691177368,
88
- "logps/generated": -358.19403076171875,
89
- "logps/real": -309.92767333984375,
90
- "loss": 0.0384,
91
- "rewards/accuracies": 0.9937499761581421,
92
- "rewards/generated": -3.1963627338409424,
93
- "rewards/margins": 6.872523307800293,
94
- "rewards/real": 3.6761608123779297,
95
  "step": 40
96
  },
97
  {
98
- "epoch": 1.92,
99
- "learning_rate": 2.1739130434782606e-08,
100
- "logits/generated": -2.7564454078674316,
101
- "logits/real": -2.7757363319396973,
102
- "logps/generated": -357.3354797363281,
103
- "logps/real": -296.8515930175781,
104
- "loss": 0.0376,
105
- "rewards/accuracies": 0.987500011920929,
106
- "rewards/generated": -2.9351892471313477,
107
- "rewards/margins": 6.2575507164001465,
108
- "rewards/real": 3.322361707687378,
109
  "step": 50
110
  },
111
  {
112
- "epoch": 1.92,
113
- "eval_logits/generated": -2.7557647228240967,
114
- "eval_logits/real": -2.7546520233154297,
115
- "eval_logps/generated": -309.8145446777344,
116
- "eval_logps/real": -304.967041015625,
117
- "eval_loss": 0.23592980206012726,
118
- "eval_rewards/accuracies": 0.9791666865348816,
119
- "eval_rewards/generated": -0.8966498374938965,
120
- "eval_rewards/margins": 2.2221336364746094,
121
- "eval_rewards/real": 1.3254839181900024,
122
- "eval_runtime": 27.8272,
123
- "eval_samples_per_second": 6.612,
124
- "eval_steps_per_second": 0.216,
125
  "step": 50
126
  },
127
  {
128
- "epoch": 2.0,
129
- "step": 52,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  "total_flos": 0.0,
131
- "train_loss": 0.2113667087486157,
132
- "train_runtime": 1162.1581,
133
- "train_samples_per_second": 2.836,
134
  "train_steps_per_second": 0.045
135
  }
136
  ],
137
  "logging_steps": 10,
138
- "max_steps": 52,
139
  "num_input_tokens_seen": 0,
140
  "num_train_epochs": 2,
141
  "save_steps": 500,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.9805825242718447,
5
  "eval_steps": 25,
6
+ "global_step": 102,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.02,
13
+ "learning_rate": 4.545454545454545e-08,
14
+ "logits/generated": -2.706744432449341,
15
+ "logits/real": -2.8291945457458496,
16
+ "logps/generated": -517.0836791992188,
17
+ "logps/real": -317.913818359375,
18
  "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/generated": 0.0,
 
23
  "step": 1
24
  },
25
  {
26
+ "epoch": 0.19,
27
+ "learning_rate": 4.545454545454545e-07,
28
+ "logits/generated": -2.71917462348938,
29
+ "logits/real": -2.7550785541534424,
30
+ "logps/generated": -531.1511840820312,
31
+ "logps/real": -286.0602111816406,
32
+ "loss": 0.5456,
33
+ "rewards/accuracies": 0.7291666865348816,
34
+ "rewards/generated": -0.04339843988418579,
35
+ "rewards/margins": 0.3565465211868286,
36
+ "rewards/real": 0.3131480813026428,
37
  "step": 10
38
  },
39
  {
40
+ "epoch": 0.39,
41
+ "learning_rate": 4.5054945054945056e-07,
42
+ "logits/generated": -2.830758571624756,
43
+ "logits/real": -2.861502170562744,
44
+ "logps/generated": -586.2496337890625,
45
+ "logps/real": -281.56402587890625,
46
+ "loss": 0.1827,
47
+ "rewards/accuracies": 0.981249988079071,
48
+ "rewards/generated": -1.7483899593353271,
49
+ "rewards/margins": 2.579699993133545,
50
+ "rewards/real": 0.8313096761703491,
51
  "step": 20
52
  },
53
  {
54
+ "epoch": 0.49,
55
+ "eval_logits/generated": -2.7481918334960938,
56
+ "eval_logits/real": -2.7943999767303467,
57
+ "eval_logps/generated": -524.2468872070312,
58
+ "eval_logps/real": -283.6961669921875,
59
+ "eval_loss": 0.16513165831565857,
60
+ "eval_rewards/accuracies": 0.96875,
61
+ "eval_rewards/generated": -3.365018129348755,
62
+ "eval_rewards/margins": 3.536374092102051,
63
+ "eval_rewards/real": 0.17135602235794067,
64
+ "eval_runtime": 54.5373,
65
+ "eval_samples_per_second": 6.748,
66
+ "eval_steps_per_second": 0.22,
67
  "step": 25
68
  },
69
  {
70
+ "epoch": 0.58,
71
+ "learning_rate": 3.9560439560439557e-07,
72
+ "logits/generated": -2.7771615982055664,
73
+ "logits/real": -2.807366371154785,
74
+ "logps/generated": -499.19207763671875,
75
+ "logps/real": -301.99432373046875,
76
+ "loss": 0.1089,
77
+ "rewards/accuracies": 0.956250011920929,
78
+ "rewards/generated": -2.920013427734375,
79
+ "rewards/margins": 3.9550042152404785,
80
+ "rewards/real": 1.034990668296814,
81
  "step": 30
82
  },
83
  {
84
+ "epoch": 0.78,
85
+ "learning_rate": 3.4065934065934063e-07,
86
+ "logits/generated": -2.665315628051758,
87
+ "logits/real": -2.7098612785339355,
88
+ "logps/generated": -539.9364013671875,
89
+ "logps/real": -248.0373077392578,
90
+ "loss": 0.0698,
91
+ "rewards/accuracies": 0.96875,
92
+ "rewards/generated": -3.776944398880005,
93
+ "rewards/margins": 5.202493667602539,
94
+ "rewards/real": 1.4255495071411133,
95
  "step": 40
96
  },
97
  {
98
+ "epoch": 0.97,
99
+ "learning_rate": 2.857142857142857e-07,
100
+ "logits/generated": -2.6974306106567383,
101
+ "logits/real": -2.761044979095459,
102
+ "logps/generated": -644.1237182617188,
103
+ "logps/real": -292.7959899902344,
104
+ "loss": 0.0462,
105
+ "rewards/accuracies": 1.0,
106
+ "rewards/generated": -4.7168684005737305,
107
+ "rewards/margins": 6.229194164276123,
108
+ "rewards/real": 1.5123255252838135,
109
  "step": 50
110
  },
111
  {
112
+ "epoch": 0.97,
113
+ "eval_logits/generated": -2.6962978839874268,
114
+ "eval_logits/real": -2.7355997562408447,
115
+ "eval_logps/generated": -535.5946655273438,
116
+ "eval_logps/real": -270.58709716796875,
117
+ "eval_loss": 0.08352091163396835,
118
+ "eval_rewards/accuracies": 1.0,
119
+ "eval_rewards/generated": -4.499805450439453,
120
+ "eval_rewards/margins": 5.9820709228515625,
121
+ "eval_rewards/real": 1.4822653532028198,
122
+ "eval_runtime": 54.7978,
123
+ "eval_samples_per_second": 6.716,
124
+ "eval_steps_per_second": 0.219,
125
  "step": 50
126
  },
127
  {
128
+ "epoch": 1.17,
129
+ "learning_rate": 2.3076923076923078e-07,
130
+ "logits/generated": -2.7394375801086426,
131
+ "logits/real": -2.751296043395996,
132
+ "logps/generated": -666.791748046875,
133
+ "logps/real": -282.9411926269531,
134
+ "loss": 0.0094,
135
+ "rewards/accuracies": 1.0,
136
+ "rewards/generated": -6.813584804534912,
137
+ "rewards/margins": 9.226309776306152,
138
+ "rewards/real": 2.4127261638641357,
139
+ "step": 60
140
+ },
141
+ {
142
+ "epoch": 1.36,
143
+ "learning_rate": 1.7582417582417584e-07,
144
+ "logits/generated": -2.7095775604248047,
145
+ "logits/real": -2.76279616355896,
146
+ "logps/generated": -612.8692626953125,
147
+ "logps/real": -276.9359130859375,
148
+ "loss": 0.0047,
149
+ "rewards/accuracies": 1.0,
150
+ "rewards/generated": -7.154807090759277,
151
+ "rewards/margins": 9.882351875305176,
152
+ "rewards/real": 2.727543592453003,
153
+ "step": 70
154
+ },
155
+ {
156
+ "epoch": 1.46,
157
+ "eval_logits/generated": -2.6846535205841064,
158
+ "eval_logits/real": -2.7210752964019775,
159
+ "eval_logps/generated": -543.0964965820312,
160
+ "eval_logps/real": -271.6846008300781,
161
+ "eval_loss": 0.08372422307729721,
162
+ "eval_rewards/accuracies": 0.9895833134651184,
163
+ "eval_rewards/generated": -5.249976634979248,
164
+ "eval_rewards/margins": 6.622490406036377,
165
+ "eval_rewards/real": 1.3725138902664185,
166
+ "eval_runtime": 56.1231,
167
+ "eval_samples_per_second": 6.557,
168
+ "eval_steps_per_second": 0.214,
169
+ "step": 75
170
+ },
171
+ {
172
+ "epoch": 1.55,
173
+ "learning_rate": 1.2087912087912088e-07,
174
+ "logits/generated": -2.6911048889160156,
175
+ "logits/real": -2.730372667312622,
176
+ "logps/generated": -622.9490966796875,
177
+ "logps/real": -264.1090087890625,
178
+ "loss": 0.0063,
179
+ "rewards/accuracies": 0.9937499761581421,
180
+ "rewards/generated": -7.1237335205078125,
181
+ "rewards/margins": 9.698349952697754,
182
+ "rewards/real": 2.5746169090270996,
183
+ "step": 80
184
+ },
185
+ {
186
+ "epoch": 1.75,
187
+ "learning_rate": 6.593406593406594e-08,
188
+ "logits/generated": -2.6824026107788086,
189
+ "logits/real": -2.712606906890869,
190
+ "logps/generated": -557.6444702148438,
191
+ "logps/real": -256.166259765625,
192
+ "loss": 0.0079,
193
+ "rewards/accuracies": 1.0,
194
+ "rewards/generated": -7.720358371734619,
195
+ "rewards/margins": 10.142163276672363,
196
+ "rewards/real": 2.4218039512634277,
197
+ "step": 90
198
+ },
199
+ {
200
+ "epoch": 1.94,
201
+ "learning_rate": 1.098901098901099e-08,
202
+ "logits/generated": -2.727700710296631,
203
+ "logits/real": -2.769242525100708,
204
+ "logps/generated": -596.8726806640625,
205
+ "logps/real": -262.6658020019531,
206
+ "loss": 0.0034,
207
+ "rewards/accuracies": 1.0,
208
+ "rewards/generated": -7.486897945404053,
209
+ "rewards/margins": 9.872056007385254,
210
+ "rewards/real": 2.385158061981201,
211
+ "step": 100
212
+ },
213
+ {
214
+ "epoch": 1.94,
215
+ "eval_logits/generated": -2.6844117641448975,
216
+ "eval_logits/real": -2.7196710109710693,
217
+ "eval_logps/generated": -545.0308837890625,
218
+ "eval_logps/real": -272.3725891113281,
219
+ "eval_loss": 0.08312664180994034,
220
+ "eval_rewards/accuracies": 0.9791666865348816,
221
+ "eval_rewards/generated": -5.443421840667725,
222
+ "eval_rewards/margins": 6.747138500213623,
223
+ "eval_rewards/real": 1.3037166595458984,
224
+ "eval_runtime": 56.0968,
225
+ "eval_samples_per_second": 6.56,
226
+ "eval_steps_per_second": 0.214,
227
+ "step": 100
228
+ },
229
+ {
230
+ "epoch": 1.98,
231
+ "step": 102,
232
  "total_flos": 0.0,
233
+ "train_loss": 0.09802283835140806,
234
+ "train_runtime": 2267.6641,
235
+ "train_samples_per_second": 2.907,
236
  "train_steps_per_second": 0.045
237
  }
238
  ],
239
  "logging_steps": 10,
240
+ "max_steps": 102,
241
  "num_input_tokens_seen": 0,
242
  "num_train_epochs": 2,
243
  "save_steps": 500,