mbial commited on
Commit
6decd5b
1 Parent(s): a11e4d7

End of training

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
  It achieves the following results on the evaluation set:
16
- - Loss: 0.1167
17
 
18
  ## Model description
19
 
@@ -44,51 +44,51 @@ The following hyperparameters were used during training:
44
 
45
  | Training Loss | Epoch | Step | Validation Loss |
46
  |:-------------:|:-----:|:----:|:---------------:|
47
- | 2.9273 | 1.0 | 6 | 2.2511 |
48
- | 1.9997 | 2.0 | 12 | 1.7899 |
49
- | 1.5879 | 3.0 | 18 | 1.4061 |
50
- | 1.2647 | 4.0 | 24 | 1.1517 |
51
- | 1.0856 | 5.0 | 30 | 0.9960 |
52
- | 0.9355 | 6.0 | 36 | 0.8942 |
53
- | 0.8536 | 7.0 | 42 | 0.8276 |
54
- | 0.7951 | 8.0 | 48 | 0.7920 |
55
- | 0.7389 | 9.0 | 54 | 0.6889 |
56
- | 0.6781 | 10.0 | 60 | 0.6773 |
57
- | 0.652 | 11.0 | 66 | 0.6381 |
58
- | 0.6175 | 12.0 | 72 | 0.6497 |
59
- | 0.6544 | 13.0 | 78 | 0.6204 |
60
- | 0.6023 | 14.0 | 84 | 0.6196 |
61
- | 0.5674 | 15.0 | 90 | 0.5411 |
62
- | 0.5383 | 16.0 | 96 | 0.5058 |
63
- | 0.5099 | 17.0 | 102 | 0.4813 |
64
- | 0.479 | 18.0 | 108 | 0.4456 |
65
- | 0.4579 | 19.0 | 114 | 0.4139 |
66
- | 0.4268 | 20.0 | 120 | 0.4113 |
67
- | 0.4266 | 21.0 | 126 | 0.3853 |
68
- | 0.3894 | 22.0 | 132 | 0.3489 |
69
- | 0.371 | 23.0 | 138 | 0.3318 |
70
- | 0.3349 | 24.0 | 144 | 0.2997 |
71
- | 0.3145 | 25.0 | 150 | 0.2812 |
72
- | 0.2893 | 26.0 | 156 | 0.2696 |
73
- | 0.2861 | 27.0 | 162 | 0.2355 |
74
- | 0.2639 | 28.0 | 168 | 0.2197 |
75
- | 0.2356 | 29.0 | 174 | 0.2040 |
76
- | 0.2205 | 30.0 | 180 | 0.1951 |
77
- | 0.2146 | 31.0 | 186 | 0.1785 |
78
- | 0.199 | 32.0 | 192 | 0.1649 |
79
- | 0.1886 | 33.0 | 198 | 0.1496 |
80
- | 0.1728 | 34.0 | 204 | 0.1400 |
81
- | 0.1933 | 35.0 | 210 | 0.1311 |
82
- | 0.1546 | 36.0 | 216 | 0.1296 |
83
- | 0.1633 | 37.0 | 222 | 0.1237 |
84
- | 0.1524 | 38.0 | 228 | 0.1202 |
85
- | 0.156 | 39.0 | 234 | 0.1177 |
86
- | 0.1439 | 40.0 | 240 | 0.1167 |
87
 
88
 
89
  ### Framework versions
90
 
91
- - Transformers 4.39.1
92
  - Pytorch 2.2.1+cu121
93
  - Datasets 2.18.0
94
  - Tokenizers 0.15.2
 
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
  It achieves the following results on the evaluation set:
16
+ - Loss: 0.2167
17
 
18
  ## Model description
19
 
 
44
 
45
  | Training Loss | Epoch | Step | Validation Loss |
46
  |:-------------:|:-----:|:----:|:---------------:|
47
+ | 2.9708 | 1.0 | 6 | 2.2532 |
48
+ | 2.0334 | 2.0 | 12 | 1.7317 |
49
+ | 1.5636 | 3.0 | 18 | 1.3306 |
50
+ | 1.2228 | 4.0 | 24 | 1.1101 |
51
+ | 1.0587 | 5.0 | 30 | 0.9678 |
52
+ | 0.9399 | 6.0 | 36 | 0.8654 |
53
+ | 0.8269 | 7.0 | 42 | 0.7420 |
54
+ | 0.7354 | 8.0 | 48 | 0.6981 |
55
+ | 0.6841 | 9.0 | 54 | 0.6609 |
56
+ | 0.6499 | 10.0 | 60 | 0.6128 |
57
+ | 0.592 | 11.0 | 66 | 0.5730 |
58
+ | 0.5651 | 12.0 | 72 | 0.5324 |
59
+ | 0.53 | 13.0 | 78 | 0.5232 |
60
+ | 0.5087 | 14.0 | 84 | 0.5066 |
61
+ | 0.5052 | 15.0 | 90 | 0.4966 |
62
+ | 0.4933 | 16.0 | 96 | 0.4476 |
63
+ | 0.4622 | 17.0 | 102 | 0.4559 |
64
+ | 0.4604 | 18.0 | 108 | 0.4520 |
65
+ | 0.4386 | 19.0 | 114 | 0.4525 |
66
+ | 0.4453 | 20.0 | 120 | 0.4345 |
67
+ | 0.4293 | 21.0 | 126 | 0.3955 |
68
+ | 0.3904 | 22.0 | 132 | 0.3695 |
69
+ | 0.3793 | 23.0 | 138 | 0.3614 |
70
+ | 0.3648 | 24.0 | 144 | 0.3463 |
71
+ | 0.3471 | 25.0 | 150 | 0.3247 |
72
+ | 0.3391 | 26.0 | 156 | 0.3100 |
73
+ | 0.3084 | 27.0 | 162 | 0.2981 |
74
+ | 0.3188 | 28.0 | 168 | 0.2901 |
75
+ | 0.3074 | 29.0 | 174 | 0.2791 |
76
+ | 0.2847 | 30.0 | 180 | 0.2832 |
77
+ | 0.2832 | 31.0 | 186 | 0.2637 |
78
+ | 0.2762 | 32.0 | 192 | 0.2596 |
79
+ | 0.2928 | 33.0 | 198 | 0.2487 |
80
+ | 0.2759 | 34.0 | 204 | 0.2408 |
81
+ | 0.2634 | 35.0 | 210 | 0.2334 |
82
+ | 0.2585 | 36.0 | 216 | 0.2270 |
83
+ | 0.2444 | 37.0 | 222 | 0.2227 |
84
+ | 0.2394 | 38.0 | 228 | 0.2183 |
85
+ | 0.2376 | 39.0 | 234 | 0.2174 |
86
+ | 0.2394 | 40.0 | 240 | 0.2167 |
87
 
88
 
89
  ### Framework versions
90
 
91
+ - Transformers 4.38.2
92
  - Pytorch 2.2.1+cu121
93
  - Datasets 2.18.0
94
  - Tokenizers 0.15.2
config.json CHANGED
@@ -78,7 +78,7 @@
78
  "typical_p": 1.0,
79
  "use_bfloat16": false,
80
  "use_cache": true,
81
- "vocab_size": 52
82
  },
83
  "decoder_start_token_id": 2,
84
  "encoder": {
@@ -157,13 +157,13 @@
157
  "typical_p": 1.0,
158
  "use_bfloat16": false,
159
  "use_cache": true,
160
- "vocab_size": 52
161
  },
162
  "eos_token_id": 0,
163
  "is_encoder_decoder": true,
164
  "model_type": "encoder-decoder",
165
  "pad_token_id": 3,
166
  "torch_dtype": "float32",
167
- "transformers_version": "4.39.1",
168
  "unk_token_id": null
169
  }
 
78
  "typical_p": 1.0,
79
  "use_bfloat16": false,
80
  "use_cache": true,
81
+ "vocab_size": 53
82
  },
83
  "decoder_start_token_id": 2,
84
  "encoder": {
 
157
  "typical_p": 1.0,
158
  "use_bfloat16": false,
159
  "use_cache": true,
160
+ "vocab_size": 53
161
  },
162
  "eos_token_id": 0,
163
  "is_encoder_decoder": true,
164
  "model_type": "encoder-decoder",
165
  "pad_token_id": 3,
166
  "torch_dtype": "float32",
167
+ "transformers_version": "4.38.2",
168
  "unk_token_id": null
169
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "decoder_start_token_id": 2,
4
  "eos_token_id": 0,
5
  "pad_token_id": 3,
6
- "transformers_version": "4.39.1"
7
  }
 
3
  "decoder_start_token_id": 2,
4
  "eos_token_id": 0,
5
  "pad_token_id": 3,
6
+ "transformers_version": "4.38.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1153cedfc85ec5d686a11284c8328805e31efaec488f840a6503823e6955c3db
3
- size 31205552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab7ebca07c1654775fe9566420c379fa893d378403fdf2efb22053f5e40528e
3
+ size 31207604
runs/Mar24_14-32-40_6999fcd7abc0/events.out.tfevents.1711290761.6999fcd7abc0.344.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:655d3e8ec7efbd55091af0cb0ac9f4cbe9559c2d2cefaa1373817fd42949c79f
3
+ size 27997
tokenizer.json CHANGED
@@ -115,76 +115,78 @@
115
  "=": 16,
116
  "10": 17,
117
  "99": 18,
118
- "98": 19,
119
- "11": 20,
120
  "12": 21,
121
  "97": 22,
122
  "96": 23,
123
  "13": 24,
124
  "95": 25,
125
  "14": 26,
126
- "94": 27,
127
- "15": 28,
128
  "16": 29,
129
  "93": 30,
130
- "92": 31,
131
- "17": 32,
132
  "91": 33,
133
  "18": 34,
134
- "19": 35,
135
- "90": 36,
136
- "89": 37,
137
- "20": 38,
138
- "88": 39,
139
- "21": 40,
140
  "22": 41,
141
  "87": 42,
142
- "23": 43,
143
- "86": 44,
144
  "85": 45,
145
  "24": 46,
146
  "25": 47,
147
  "84": 48,
148
  "26": 49,
149
  "83": 50,
150
- "27": 51
 
151
  },
152
  "merges": [
153
  "1 0",
154
  "9 9",
155
- "9 8",
156
  "1 1",
 
157
  "1 2",
158
  "9 7",
159
  "9 6",
160
  "1 3",
161
  "9 5",
162
  "1 4",
163
- "9 4",
164
  "1 5",
 
165
  "1 6",
166
  "9 3",
167
- "9 2",
168
  "1 7",
 
169
  "9 1",
170
  "1 8",
171
- "1 9",
172
  "9 0",
173
- "8 9",
174
  "2 0",
175
- "8 8",
176
  "2 1",
 
177
  "2 2",
178
  "8 7",
179
- "2 3",
180
  "8 6",
 
181
  "8 5",
182
  "2 4",
183
  "2 5",
184
  "8 4",
185
  "2 6",
186
  "8 3",
187
- "2 7"
 
188
  ]
189
  }
190
  }
 
115
  "=": 16,
116
  "10": 17,
117
  "99": 18,
118
+ "11": 19,
119
+ "98": 20,
120
  "12": 21,
121
  "97": 22,
122
  "96": 23,
123
  "13": 24,
124
  "95": 25,
125
  "14": 26,
126
+ "15": 27,
127
+ "94": 28,
128
  "16": 29,
129
  "93": 30,
130
+ "17": 31,
131
+ "92": 32,
132
  "91": 33,
133
  "18": 34,
134
+ "90": 35,
135
+ "19": 36,
136
+ "20": 37,
137
+ "89": 38,
138
+ "21": 39,
139
+ "88": 40,
140
  "22": 41,
141
  "87": 42,
142
+ "86": 43,
143
+ "23": 44,
144
  "85": 45,
145
  "24": 46,
146
  "25": 47,
147
  "84": 48,
148
  "26": 49,
149
  "83": 50,
150
+ "27": 51,
151
+ "82": 52
152
  },
153
  "merges": [
154
  "1 0",
155
  "9 9",
 
156
  "1 1",
157
+ "9 8",
158
  "1 2",
159
  "9 7",
160
  "9 6",
161
  "1 3",
162
  "9 5",
163
  "1 4",
 
164
  "1 5",
165
+ "9 4",
166
  "1 6",
167
  "9 3",
 
168
  "1 7",
169
+ "9 2",
170
  "9 1",
171
  "1 8",
 
172
  "9 0",
173
+ "1 9",
174
  "2 0",
175
+ "8 9",
176
  "2 1",
177
+ "8 8",
178
  "2 2",
179
  "8 7",
 
180
  "8 6",
181
+ "2 3",
182
  "8 5",
183
  "2 4",
184
  "2 5",
185
  "8 4",
186
  "2 6",
187
  "8 3",
188
+ "2 7",
189
+ "8 2"
190
  ]
191
  }
192
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4b9f3f5874638aabbe2e04368849eece01253418968201c2f118982fc0761f0
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:160d6026e0184d20fa6b068e287f3fa90f6d2471cb2d32c45c963f3a6f69912f
3
  size 5112