kibrq commited on
Commit
b15d2c2
1 Parent(s): 2d6ab5e

Update model and tokenizer file

Browse files
config.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "_name_or_path": "/home/kibrq/sampling-from-normal-closure/results/commutator-translation/4-free-group/bert-bert-250/checkpoint-50000",
3
  "architectures": [
4
  "EncoderDecoderModel"
5
  ],
 
6
  "decoder": {
7
  "_name_or_path": "",
8
  "add_cross_attention": true,
@@ -53,7 +54,7 @@
53
  "output_attentions": false,
54
  "output_hidden_states": false,
55
  "output_scores": false,
56
- "pad_token_id": 12,
57
  "position_embedding_type": "absolute",
58
  "prefix": null,
59
  "problem_type": null,
@@ -78,7 +79,7 @@
78
  "typical_p": 1.0,
79
  "use_bfloat16": false,
80
  "use_cache": true,
81
- "vocab_size": 13
82
  },
83
  "decoder_start_token_id": 11,
84
  "encoder": {
@@ -131,7 +132,7 @@
131
  "output_attentions": false,
132
  "output_hidden_states": false,
133
  "output_scores": false,
134
- "pad_token_id": 12,
135
  "position_embedding_type": "absolute",
136
  "prefix": null,
137
  "problem_type": null,
@@ -156,11 +157,12 @@
156
  "typical_p": 1.0,
157
  "use_bfloat16": false,
158
  "use_cache": true,
159
- "vocab_size": 13
160
  },
 
161
  "is_encoder_decoder": true,
162
  "model_type": "encoder-decoder",
163
- "pad_token_id": 12,
164
  "torch_dtype": "float32",
165
  "transformers_version": null
166
  }
 
1
  {
2
+ "_name_or_path": "/home/kibrq/draft/commutator-translator/bert-bert-512/checkpoint-419000",
3
  "architectures": [
4
  "EncoderDecoderModel"
5
  ],
6
+ "bos_token_id": 11,
7
  "decoder": {
8
  "_name_or_path": "",
9
  "add_cross_attention": true,
 
54
  "output_attentions": false,
55
  "output_hidden_states": false,
56
  "output_scores": false,
57
+ "pad_token_id": 13,
58
  "position_embedding_type": "absolute",
59
  "prefix": null,
60
  "problem_type": null,
 
79
  "typical_p": 1.0,
80
  "use_bfloat16": false,
81
  "use_cache": true,
82
+ "vocab_size": 14
83
  },
84
  "decoder_start_token_id": 11,
85
  "encoder": {
 
132
  "output_attentions": false,
133
  "output_hidden_states": false,
134
  "output_scores": false,
135
+ "pad_token_id": 13,
136
  "position_embedding_type": "absolute",
137
  "prefix": null,
138
  "problem_type": null,
 
157
  "typical_p": 1.0,
158
  "use_bfloat16": false,
159
  "use_cache": true,
160
+ "vocab_size": 14
161
  },
162
+ "eos_token_id": 12,
163
  "is_encoder_decoder": true,
164
  "model_type": "encoder-decoder",
165
+ "pad_token_id": 13,
166
  "torch_dtype": "float32",
167
  "transformers_version": null
168
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d22f6c52b973dd2e02be4e76967ed8ceab03c46731d8f144d1a405e76a60aab0
3
- size 23610219
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e02c5c9098359dd6f2cb71eb401ca5889935b907919d2fbde3564fa71e894817
3
+ size 23603883
special_tokens_map.json CHANGED
@@ -1 +1,5 @@
1
- {}
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<pad>"
5
+ }
tokenizer.json CHANGED
@@ -3,9 +3,81 @@
3
  "truncation": null,
4
  "padding": null,
5
  "added_tokens": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  {
7
  "id": 8,
8
- "content": ",",
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
@@ -14,7 +86,7 @@
14
  },
15
  {
16
  "id": 9,
17
- "content": "[",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
@@ -23,7 +95,7 @@
23
  },
24
  {
25
  "id": 10,
26
- "content": "]",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
@@ -41,6 +113,15 @@
41
  },
42
  {
43
  "id": 12,
 
 
 
 
 
 
 
 
 
44
  "content": "<pad>",
45
  "single_word": false,
46
  "lstrip": false,
@@ -61,6 +142,12 @@
61
  "post_processor": {
62
  "type": "TemplateProcessing",
63
  "single": [
 
 
 
 
 
 
64
  {
65
  "Sequence": {
66
  "id": "A",
@@ -69,7 +156,7 @@
69
  },
70
  {
71
  "SpecialToken": {
72
- "id": "<s>",
73
  "type_id": 0
74
  }
75
  }
@@ -89,6 +176,15 @@
89
  }
90
  ],
91
  "special_tokens": {
 
 
 
 
 
 
 
 
 
92
  "<s>": {
93
  "id": "<s>",
94
  "ids": [
@@ -103,16 +199,7 @@
103
  "decoder": null,
104
  "model": {
105
  "type": "WordLevel",
106
- "vocab": {
107
- "-4": 0,
108
- "-3": 1,
109
- "-2": 2,
110
- "-1": 3,
111
- "1": 4,
112
- "2": 5,
113
- "3": 6,
114
- "4": 7
115
- },
116
  "unk_token": "<unk>"
117
  }
118
  }
 
3
  "truncation": null,
4
  "padding": null,
5
  "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "1",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": true,
13
+ "special": false
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "-1",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": true,
22
+ "special": false
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "2",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": true,
31
+ "special": false
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "-2",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": true,
40
+ "special": false
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "3",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": true,
49
+ "special": false
50
+ },
51
+ {
52
+ "id": 5,
53
+ "content": "-3",
54
+ "single_word": false,
55
+ "lstrip": false,
56
+ "rstrip": false,
57
+ "normalized": true,
58
+ "special": false
59
+ },
60
+ {
61
+ "id": 6,
62
+ "content": "4",
63
+ "single_word": false,
64
+ "lstrip": false,
65
+ "rstrip": false,
66
+ "normalized": true,
67
+ "special": false
68
+ },
69
+ {
70
+ "id": 7,
71
+ "content": "-4",
72
+ "single_word": false,
73
+ "lstrip": false,
74
+ "rstrip": false,
75
+ "normalized": true,
76
+ "special": false
77
+ },
78
  {
79
  "id": 8,
80
+ "content": "[",
81
  "single_word": false,
82
  "lstrip": false,
83
  "rstrip": false,
 
86
  },
87
  {
88
  "id": 9,
89
+ "content": "]",
90
  "single_word": false,
91
  "lstrip": false,
92
  "rstrip": false,
 
95
  },
96
  {
97
  "id": 10,
98
+ "content": ",",
99
  "single_word": false,
100
  "lstrip": false,
101
  "rstrip": false,
 
113
  },
114
  {
115
  "id": 12,
116
+ "content": "</s>",
117
+ "single_word": false,
118
+ "lstrip": false,
119
+ "rstrip": false,
120
+ "normalized": false,
121
+ "special": true
122
+ },
123
+ {
124
+ "id": 13,
125
  "content": "<pad>",
126
  "single_word": false,
127
  "lstrip": false,
 
142
  "post_processor": {
143
  "type": "TemplateProcessing",
144
  "single": [
145
+ {
146
+ "SpecialToken": {
147
+ "id": "<s>",
148
+ "type_id": 0
149
+ }
150
+ },
151
  {
152
  "Sequence": {
153
  "id": "A",
 
156
  },
157
  {
158
  "SpecialToken": {
159
+ "id": "</s>",
160
  "type_id": 0
161
  }
162
  }
 
176
  }
177
  ],
178
  "special_tokens": {
179
+ "</s>": {
180
+ "id": "</s>",
181
+ "ids": [
182
+ 12
183
+ ],
184
+ "tokens": [
185
+ "</s>"
186
+ ]
187
+ },
188
  "<s>": {
189
  "id": "<s>",
190
  "ids": [
 
199
  "decoder": null,
200
  "model": {
201
  "type": "WordLevel",
202
+ "vocab": {},
 
 
 
 
 
 
 
 
 
203
  "unk_token": "<unk>"
204
  }
205
  }
tokenizer_config.json CHANGED
@@ -1,3 +1,8 @@
1
  {
 
 
 
 
 
2
  "tokenizer_class": "PreTrainedTokenizerFast"
3
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "name_or_path": "/home/kibrq/draft/tokenizer/word-level-tokenizer-4",
5
+ "pad_token": "<pad>",
6
+ "special_tokens_map_file": "/home/kibrq/draft/tokenizer/word-level-tokenizer-4/special_tokens_map.json",
7
  "tokenizer_class": "PreTrainedTokenizerFast"
8
  }