pszmk commited on
Commit
9873077
1 Parent(s): b2eea4d

End of training

Browse files
README.md CHANGED
@@ -13,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
  It achieves the following results on the evaluation set:
16
- - Loss: 0.7556
17
 
18
  ## Model description
19
 
@@ -32,7 +32,7 @@ More information needed
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
- - learning_rate: 0.001
36
  - train_batch_size: 512
37
  - eval_batch_size: 512
38
  - seed: 42
@@ -44,46 +44,46 @@ The following hyperparameters were used during training:
44
 
45
  | Training Loss | Epoch | Step | Validation Loss |
46
  |:-------------:|:-----:|:----:|:---------------:|
47
- | 3.1456 | 1.0 | 6 | 2.5517 |
48
- | 2.1398 | 2.0 | 12 | 1.8461 |
49
- | 1.7232 | 3.0 | 18 | 1.6222 |
50
- | 1.6435 | 4.0 | 24 | 1.6869 |
51
- | 1.6417 | 5.0 | 30 | 1.5262 |
52
- | 1.4634 | 6.0 | 36 | 1.4824 |
53
- | 1.4507 | 7.0 | 42 | 1.8991 |
54
- | 1.54 | 8.0 | 48 | 1.4370 |
55
- | 1.483 | 9.0 | 54 | 1.4203 |
56
- | 1.4086 | 10.0 | 60 | 1.4349 |
57
- | 1.4063 | 11.0 | 66 | 1.5677 |
58
- | 1.4235 | 12.0 | 72 | 1.3646 |
59
- | 1.2839 | 13.0 | 78 | 1.2074 |
60
- | 1.2127 | 14.0 | 84 | 1.2050 |
61
- | 1.1447 | 15.0 | 90 | 1.1797 |
62
- | 1.1515 | 16.0 | 96 | 1.1440 |
63
- | 1.1138 | 17.0 | 102 | 1.1293 |
64
- | 1.07 | 18.0 | 108 | 1.1061 |
65
- | 1.0295 | 19.0 | 114 | 1.1572 |
66
- | 1.1086 | 20.0 | 120 | 1.3547 |
67
- | 1.1835 | 21.0 | 126 | 1.1306 |
68
- | 1.0968 | 22.0 | 132 | 1.0152 |
69
- | 1.0012 | 23.0 | 138 | 1.0467 |
70
- | 1.0088 | 24.0 | 144 | 0.9831 |
71
- | 0.9616 | 25.0 | 150 | 0.9141 |
72
- | 0.9385 | 26.0 | 156 | 0.9093 |
73
- | 0.8887 | 27.0 | 162 | 0.9188 |
74
- | 0.9549 | 28.0 | 168 | 0.9369 |
75
- | 0.8972 | 29.0 | 174 | 0.8518 |
76
- | 0.8973 | 30.0 | 180 | 0.8424 |
77
- | 0.8419 | 31.0 | 186 | 0.8858 |
78
- | 0.8887 | 32.0 | 192 | 0.8781 |
79
- | 0.8509 | 33.0 | 198 | 0.8372 |
80
- | 0.8473 | 34.0 | 204 | 0.8021 |
81
- | 0.8361 | 35.0 | 210 | 0.7912 |
82
- | 0.8037 | 36.0 | 216 | 0.7771 |
83
- | 0.7776 | 37.0 | 222 | 0.7742 |
84
- | 0.7767 | 38.0 | 228 | 0.7602 |
85
- | 0.795 | 39.0 | 234 | 0.7568 |
86
- | 0.8284 | 40.0 | 240 | 0.7556 |
87
 
88
 
89
  ### Framework versions
 
13
 
14
  This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
15
  It achieves the following results on the evaluation set:
16
+ - Loss: 2.9524
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 0.01
36
  - train_batch_size: 512
37
  - eval_batch_size: 512
38
  - seed: 42
 
44
 
45
  | Training Loss | Epoch | Step | Validation Loss |
46
  |:-------------:|:-----:|:----:|:---------------:|
47
+ | 3.6485 | 1.0 | 6 | 3.2312 |
48
+ | 3.1316 | 2.0 | 12 | 2.9996 |
49
+ | 3.037 | 3.0 | 18 | 3.0512 |
50
+ | 3.0193 | 4.0 | 24 | 3.0006 |
51
+ | 2.9967 | 5.0 | 30 | 2.9620 |
52
+ | 2.9847 | 6.0 | 36 | 2.9710 |
53
+ | 2.9765 | 7.0 | 42 | 2.9646 |
54
+ | 2.9729 | 8.0 | 48 | 2.9598 |
55
+ | 2.9755 | 9.0 | 54 | 2.9601 |
56
+ | 2.9644 | 10.0 | 60 | 2.9621 |
57
+ | 2.967 | 11.0 | 66 | 2.9609 |
58
+ | 2.9713 | 12.0 | 72 | 2.9563 |
59
+ | 2.9704 | 13.0 | 78 | 2.9534 |
60
+ | 2.9708 | 14.0 | 84 | 2.9552 |
61
+ | 2.9582 | 15.0 | 90 | 2.9530 |
62
+ | 2.9729 | 16.0 | 96 | 2.9547 |
63
+ | 2.9628 | 17.0 | 102 | 2.9576 |
64
+ | 2.9621 | 18.0 | 108 | 2.9559 |
65
+ | 2.968 | 19.0 | 114 | 2.9524 |
66
+ | 2.9663 | 20.0 | 120 | 2.9556 |
67
+ | 2.9609 | 21.0 | 126 | 2.9534 |
68
+ | 2.9661 | 22.0 | 132 | 2.9523 |
69
+ | 2.9563 | 23.0 | 138 | 2.9593 |
70
+ | 2.9656 | 24.0 | 144 | 2.9551 |
71
+ | 2.9604 | 25.0 | 150 | 2.9534 |
72
+ | 2.9694 | 26.0 | 156 | 2.9525 |
73
+ | 2.9636 | 27.0 | 162 | 2.9522 |
74
+ | 2.9561 | 28.0 | 168 | 2.9535 |
75
+ | 2.9762 | 29.0 | 174 | 2.9548 |
76
+ | 2.962 | 30.0 | 180 | 2.9530 |
77
+ | 2.9537 | 31.0 | 186 | 2.9501 |
78
+ | 2.9592 | 32.0 | 192 | 2.9513 |
79
+ | 2.9695 | 33.0 | 198 | 2.9519 |
80
+ | 2.9664 | 34.0 | 204 | 2.9527 |
81
+ | 2.9594 | 35.0 | 210 | 2.9532 |
82
+ | 2.9582 | 36.0 | 216 | 2.9529 |
83
+ | 2.9718 | 37.0 | 222 | 2.9533 |
84
+ | 2.9563 | 38.0 | 228 | 2.9530 |
85
+ | 2.9599 | 39.0 | 234 | 2.9525 |
86
+ | 2.9615 | 40.0 | 240 | 2.9524 |
87
 
88
 
89
  ### Framework versions
config.json CHANGED
@@ -78,7 +78,7 @@
78
  "typical_p": 1.0,
79
  "use_bfloat16": false,
80
  "use_cache": true,
81
- "vocab_size": 52
82
  },
83
  "decoder_start_token_id": 2,
84
  "encoder": {
@@ -157,7 +157,7 @@
157
  "typical_p": 1.0,
158
  "use_bfloat16": false,
159
  "use_cache": true,
160
- "vocab_size": 52
161
  },
162
  "eos_token_id": 0,
163
  "is_encoder_decoder": true,
 
78
  "typical_p": 1.0,
79
  "use_bfloat16": false,
80
  "use_cache": true,
81
+ "vocab_size": 53
82
  },
83
  "decoder_start_token_id": 2,
84
  "encoder": {
 
157
  "typical_p": 1.0,
158
  "use_bfloat16": false,
159
  "use_cache": true,
160
+ "vocab_size": 53
161
  },
162
  "eos_token_id": 0,
163
  "is_encoder_decoder": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aac6b2a604579ceb3da782ae54c341d32a629e1cd50b9f20eea47ba3eae91145
3
- size 31205552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4624a45de91282f047b17d2f4fcf3b7d651cffacd9f833701f580bed957b5aab
3
+ size 31207604
runs/Mar04_10-15-03_c80d5fea20ad/events.out.tfevents.1709547304.c80d5fea20ad.139.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b38305991b5bd12eded201b427f7af93f9906f6d106b2dd283b10d0342c3e06
3
+ size 15303
runs/Mar04_10-22-11_c80d5fea20ad/events.out.tfevents.1709547732.c80d5fea20ad.139.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0150a45616450d7d666e8bb427712c0d4eea0bae3fc388650bfed1a2f86dfe5e
3
+ size 27996
tokenizer.json CHANGED
@@ -112,42 +112,43 @@
112
  "7": 13,
113
  "8": 14,
114
  "9": 15,
115
- "10": 16,
116
- "99": 17,
117
- "98": 18,
118
- "11": 19,
119
- "97": 20,
120
- "12": 21,
121
- "13": 22,
122
  "96": 23,
123
- "95": 24,
124
- "14": 25,
125
- "94": 26,
126
- "15": 27,
127
- "93": 28,
128
- "16": 29,
129
- "92": 30,
130
  "17": 31,
131
- "91": 32,
132
  "18": 33,
133
- "90": 34,
134
- "19": 35,
135
- "20": 36,
136
- "89": 37,
137
- "88": 38,
138
  "21": 39,
139
- "22": 40,
140
  "87": 41,
141
- "86": 42,
142
  "23": 43,
143
- "24": 44,
144
  "85": 45,
145
- "25": 46,
146
- "84": 47,
147
- "26": 48,
148
  "83": 49,
149
- "27": 50,
150
- "82": 51
 
151
  },
152
  "merges": [
153
  "1 0",
@@ -156,34 +157,34 @@
156
  "1 1",
157
  "9 7",
158
  "1 2",
159
- "1 3",
160
  "9 6",
 
161
  "9 5",
162
  "1 4",
163
  "9 4",
164
  "1 5",
165
  "9 3",
166
  "1 6",
167
- "9 2",
168
  "1 7",
169
- "9 1",
170
  "1 8",
 
171
  "9 0",
172
  "1 9",
173
  "2 0",
174
  "8 9",
175
- "8 8",
176
  "2 1",
177
- "2 2",
178
  "8 7",
179
- "8 6",
180
  "2 3",
181
- "2 4",
182
  "8 5",
 
183
  "2 5",
184
  "8 4",
185
- "2 6",
186
  "8 3",
 
187
  "2 7",
188
  "8 2"
189
  ]
 
112
  "7": 13,
113
  "8": 14,
114
  "9": 15,
115
+ "=": 16,
116
+ "10": 17,
117
+ "99": 18,
118
+ "98": 19,
119
+ "11": 20,
120
+ "97": 21,
121
+ "12": 22,
122
  "96": 23,
123
+ "13": 24,
124
+ "95": 25,
125
+ "14": 26,
126
+ "94": 27,
127
+ "15": 28,
128
+ "93": 29,
129
+ "16": 30,
130
  "17": 31,
131
+ "92": 32,
132
  "18": 33,
133
+ "91": 34,
134
+ "90": 35,
135
+ "19": 36,
136
+ "20": 37,
137
+ "89": 38,
138
  "21": 39,
139
+ "88": 40,
140
  "87": 41,
141
+ "22": 42,
142
  "23": 43,
143
+ "86": 44,
144
  "85": 45,
145
+ "24": 46,
146
+ "25": 47,
147
+ "84": 48,
148
  "83": 49,
149
+ "26": 50,
150
+ "27": 51,
151
+ "82": 52
152
  },
153
  "merges": [
154
  "1 0",
 
157
  "1 1",
158
  "9 7",
159
  "1 2",
 
160
  "9 6",
161
+ "1 3",
162
  "9 5",
163
  "1 4",
164
  "9 4",
165
  "1 5",
166
  "9 3",
167
  "1 6",
 
168
  "1 7",
169
+ "9 2",
170
  "1 8",
171
+ "9 1",
172
  "9 0",
173
  "1 9",
174
  "2 0",
175
  "8 9",
 
176
  "2 1",
177
+ "8 8",
178
  "8 7",
179
+ "2 2",
180
  "2 3",
181
+ "8 6",
182
  "8 5",
183
+ "2 4",
184
  "2 5",
185
  "8 4",
 
186
  "8 3",
187
+ "2 6",
188
  "2 7",
189
  "8 2"
190
  ]
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:018a1b683f3740f2832794a3b95bb71f04f3c029f972d9abc1dca51f7ba897d0
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c442b4abff85481bb4365240162bad56919332b7b6f174a7892012f02caf99ce
3
  size 5112