zyliu commited on
Commit
faae5d8
1 Parent(s): 24ae181

first init

Browse files
added_tokens.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92552,
3
+ "</det>": 92560,
4
+ "</grd>": 92562,
5
+ "</img>": 92545,
6
+ "</quad>": 92548,
7
+ "</ref>": 92550,
8
+ "</reg>": 92558,
9
+ "</s>": 2,
10
+ "<IMG_CONTEXT>": 92546,
11
+ "<box>": 92551,
12
+ "<det>": 92559,
13
+ "<grd>": 92561,
14
+ "<im_patch>": 92555,
15
+ "<image>": 92554,
16
+ "<img>": 92544,
17
+ "<quad>": 92547,
18
+ "<ref>": 92549,
19
+ "<reg>": 92557,
20
+ "<region>": 92556,
21
+ "<s>": 1,
22
+ "<unk>": 0,
23
+ "<|action_end|>": 92540,
24
+ "<|action_start|>": 92541,
25
+ "<|im_end|>": 92542,
26
+ "<|im_start|>": 92543,
27
+ "<|interpreter|>": 92539,
28
+ "<|plugin|>": 92538,
29
+ "[DET]": 92563,
30
+ "[EDIT]": 92568,
31
+ "[EMB2]": 92570,
32
+ "[EMB3]": 92571,
33
+ "[EMB4]": 92572,
34
+ "[EMB5]": 92573,
35
+ "[EMB6]": 92574,
36
+ "[EMB7]": 92575,
37
+ "[EMB8]": 92576,
38
+ "[EMB]": 92569,
39
+ "[GEN]": 92567,
40
+ "[GRD]": 92564,
41
+ "[PAD]": 92553,
42
+ "[POSE]": 92566,
43
+ "[SEG]": 92565
44
+ }
config.json ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "work_dirs/internvl-gen-edit-1epoch/bak/checkpoint-14000",
4
+ "architectures": [
5
+ "VisionLLMv2Model"
6
+ ],
7
+ "gdino_config": null,
8
+ "ip2p_config": {
9
+ "_name_or_path": "visionllmv2/model/instruct_pix2pix/ip2p.json",
10
+ "add_cross_attention": false,
11
+ "architectures": [
12
+ "InstructPix2PixWithLLMEmbConfig"
13
+ ],
14
+ "bad_words_ids": null,
15
+ "begin_suppress_tokens": null,
16
+ "bos_token_id": null,
17
+ "cfg_drop_rate": 0.05,
18
+ "cfg_scale": 7.5,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "early_stopping": false,
25
+ "embed_tokens": {
26
+ "emb": "[EMB]",
27
+ "emb2": "[EMB2]",
28
+ "emb3": "[EMB3]",
29
+ "emb4": "[EMB4]",
30
+ "emb5": "[EMB5]",
31
+ "emb6": "[EMB6]",
32
+ "emb7": "[EMB7]",
33
+ "emb8": "[EMB8]"
34
+ },
35
+ "encoder_no_repeat_ngram_size": 0,
36
+ "eos_token_id": null,
37
+ "exponential_decay_length_penalty": null,
38
+ "finetuning_task": null,
39
+ "forced_bos_token_id": null,
40
+ "forced_eos_token_id": null,
41
+ "id2label": {
42
+ "0": "LABEL_0",
43
+ "1": "LABEL_1"
44
+ },
45
+ "is_decoder": false,
46
+ "is_encoder_decoder": false,
47
+ "label2id": {
48
+ "LABEL_0": 0,
49
+ "LABEL_1": 1
50
+ },
51
+ "length_penalty": 1.0,
52
+ "llm_hidden_size": 6144,
53
+ "max_length": 20,
54
+ "min_length": 0,
55
+ "model_type": "instructpix2pix_with_llm_emb",
56
+ "no_repeat_ngram_size": 0,
57
+ "num_beam_groups": 1,
58
+ "num_beams": 1,
59
+ "num_decoder_layers": 1,
60
+ "num_embed_tokens": 64,
61
+ "num_encoder_layers": 1,
62
+ "num_queries": 77,
63
+ "num_return_sequences": 1,
64
+ "output_attentions": false,
65
+ "output_hidden_states": false,
66
+ "output_scores": false,
67
+ "pad_token_id": null,
68
+ "prefix": null,
69
+ "problem_type": null,
70
+ "pruned_heads": {},
71
+ "remove_invalid_values": false,
72
+ "repetition_penalty": 1.0,
73
+ "return_dict": true,
74
+ "return_dict_in_generate": false,
75
+ "sd_hidden_size": 768,
76
+ "sd_model_id": "checkpoints/instruct-pix2pix",
77
+ "sep_token_id": null,
78
+ "suppress_tokens": null,
79
+ "task_specific_params": null,
80
+ "temperature": 1.0,
81
+ "tf_legacy_loss": false,
82
+ "tie_encoder_decoder": false,
83
+ "tie_word_embeddings": true,
84
+ "tokenizer_class": null,
85
+ "top_k": 50,
86
+ "top_p": 1.0,
87
+ "torch_dtype": null,
88
+ "torchscript": false,
89
+ "transformers_version": "4.34.0",
90
+ "trigger_token": "[EDIT]",
91
+ "trigger_token_id": 92568,
92
+ "typical_p": 1.0,
93
+ "use_bfloat16": false
94
+ },
95
+ "l_hidden_size": 6144,
96
+ "llm_config": {
97
+ "_name_or_path": "pretrained/internlm2-chat-20b/",
98
+ "add_cross_attention": false,
99
+ "architectures": [
100
+ "InternLM2ForCausalLM"
101
+ ],
102
+ "attention_bias": false,
103
+ "attn_implementation": "flash_attention_2",
104
+ "auto_map": {
105
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
106
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
107
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
108
+ },
109
+ "bad_words_ids": null,
110
+ "begin_suppress_tokens": null,
111
+ "bias": false,
112
+ "bos_token_id": 1,
113
+ "chunk_size_feed_forward": 0,
114
+ "cross_attention_hidden_size": null,
115
+ "decoder_start_token_id": null,
116
+ "diversity_penalty": 0.0,
117
+ "do_sample": false,
118
+ "early_stopping": false,
119
+ "encoder_no_repeat_ngram_size": 0,
120
+ "eos_token_id": 2,
121
+ "exponential_decay_length_penalty": null,
122
+ "finetuning_task": null,
123
+ "forced_bos_token_id": null,
124
+ "forced_eos_token_id": null,
125
+ "hidden_act": "silu",
126
+ "hidden_size": 6144,
127
+ "id2label": {
128
+ "0": "LABEL_0",
129
+ "1": "LABEL_1"
130
+ },
131
+ "initializer_range": 0.02,
132
+ "intermediate_size": 16384,
133
+ "is_decoder": false,
134
+ "is_encoder_decoder": false,
135
+ "label2id": {
136
+ "LABEL_0": 0,
137
+ "LABEL_1": 1
138
+ },
139
+ "length_penalty": 1.0,
140
+ "max_length": 20,
141
+ "max_position_embeddings": 32768,
142
+ "min_length": 0,
143
+ "model_type": "llama",
144
+ "no_repeat_ngram_size": 0,
145
+ "num_attention_heads": 48,
146
+ "num_beam_groups": 1,
147
+ "num_beams": 1,
148
+ "num_hidden_layers": 48,
149
+ "num_key_value_heads": 8,
150
+ "num_return_sequences": 1,
151
+ "output_attentions": false,
152
+ "output_hidden_states": false,
153
+ "output_scores": false,
154
+ "pad_token_id": 2,
155
+ "prefix": null,
156
+ "pretraining_tp": 1,
157
+ "problem_type": null,
158
+ "pruned_heads": {},
159
+ "remove_invalid_values": false,
160
+ "repetition_penalty": 1.0,
161
+ "return_dict": true,
162
+ "return_dict_in_generate": false,
163
+ "rms_norm_eps": 1e-05,
164
+ "rope_scaling": {
165
+ "factor": 3.0,
166
+ "type": "dynamic"
167
+ },
168
+ "rope_theta": 1000000,
169
+ "sep_token_id": null,
170
+ "suppress_tokens": null,
171
+ "task_specific_params": null,
172
+ "temperature": 1.0,
173
+ "tf_legacy_loss": false,
174
+ "tie_encoder_decoder": false,
175
+ "tie_word_embeddings": false,
176
+ "tokenizer_class": null,
177
+ "top_k": 50,
178
+ "top_p": 1.0,
179
+ "torch_dtype": "bfloat16",
180
+ "torchscript": false,
181
+ "transformers_version": "4.34.0",
182
+ "typical_p": 1.0,
183
+ "use_bfloat16": false,
184
+ "use_cache": true,
185
+ "vocab_size": 92577
186
+ },
187
+ "model_type": "visionllmv2",
188
+ "num_embs": 4,
189
+ "num_embs_gen": 64,
190
+ "pretrained_vl_bridge": null,
191
+ "sd_config": {
192
+ "_name_or_path": "visionllmv2/model/stable_diffusion/sd.json",
193
+ "add_cross_attention": false,
194
+ "architectures": [
195
+ "StableDiffusionWithLLMEmbConfig"
196
+ ],
197
+ "bad_words_ids": null,
198
+ "begin_suppress_tokens": null,
199
+ "bos_token_id": null,
200
+ "cfg_drop_rate": 0.1,
201
+ "cfg_scale": 7.5,
202
+ "chunk_size_feed_forward": 0,
203
+ "cross_attention_hidden_size": null,
204
+ "decoder_start_token_id": null,
205
+ "diversity_penalty": 0.0,
206
+ "do_sample": false,
207
+ "early_stopping": false,
208
+ "embed_tokens": {
209
+ "emb": "[EMB]",
210
+ "emb2": "[EMB2]",
211
+ "emb3": "[EMB3]",
212
+ "emb4": "[EMB4]",
213
+ "emb5": "[EMB5]",
214
+ "emb6": "[EMB6]",
215
+ "emb7": "[EMB7]",
216
+ "emb8": "[EMB8]"
217
+ },
218
+ "encoder_no_repeat_ngram_size": 0,
219
+ "eos_token_id": null,
220
+ "exponential_decay_length_penalty": null,
221
+ "finetuning_task": null,
222
+ "forced_bos_token_id": null,
223
+ "forced_eos_token_id": null,
224
+ "id2label": {
225
+ "0": "LABEL_0",
226
+ "1": "LABEL_1"
227
+ },
228
+ "is_decoder": false,
229
+ "is_encoder_decoder": false,
230
+ "label2id": {
231
+ "LABEL_0": 0,
232
+ "LABEL_1": 1
233
+ },
234
+ "length_penalty": 1.0,
235
+ "llm_hidden_size": 6144,
236
+ "max_length": 20,
237
+ "min_length": 0,
238
+ "model_type": "stable_diffusion_with_llm_emb",
239
+ "no_repeat_ngram_size": 0,
240
+ "num_beam_groups": 1,
241
+ "num_beams": 1,
242
+ "num_decoder_layers": 1,
243
+ "num_embed_tokens": 64,
244
+ "num_encoder_layers": 1,
245
+ "num_queries": 77,
246
+ "num_return_sequences": 1,
247
+ "output_attentions": false,
248
+ "output_hidden_states": false,
249
+ "output_scores": false,
250
+ "pad_token_id": null,
251
+ "prefix": null,
252
+ "problem_type": null,
253
+ "pruned_heads": {},
254
+ "remove_invalid_values": false,
255
+ "repetition_penalty": 1.0,
256
+ "return_dict": true,
257
+ "return_dict_in_generate": false,
258
+ "sd_hidden_size": 768,
259
+ "sd_model_id": "checkpoints/stable-diffusion-v1-5",
260
+ "sep_token_id": null,
261
+ "suppress_tokens": null,
262
+ "task_specific_params": null,
263
+ "temperature": 1.0,
264
+ "tf_legacy_loss": false,
265
+ "tie_encoder_decoder": false,
266
+ "tie_word_embeddings": true,
267
+ "tokenizer_class": null,
268
+ "top_k": 50,
269
+ "top_p": 1.0,
270
+ "torch_dtype": null,
271
+ "torchscript": false,
272
+ "transformers_version": "4.34.0",
273
+ "trigger_token": "[GEN]",
274
+ "trigger_token_id": 92567,
275
+ "typical_p": 1.0,
276
+ "use_bfloat16": false
277
+ },
278
+ "torch_dtype": "float32",
279
+ "transformers_version": null,
280
+ "unipose_config": null,
281
+ "use_gdino": false,
282
+ "use_ip2p": true,
283
+ "use_llm_lora": false,
284
+ "use_pixelshuffle": true,
285
+ "use_region_encoder": false,
286
+ "use_sd": true,
287
+ "use_unipose": false,
288
+ "v_hidden_size": 3200,
289
+ "vis_encoder_config": {
290
+ "_name_or_path": "OpenGVLab/InternViT-6B-448px-V1-5",
291
+ "add_cross_attention": false,
292
+ "architectures": [
293
+ "InternVisionModel"
294
+ ],
295
+ "attention_dropout": 0.0,
296
+ "auto_map": {
297
+ "AutoConfig": "configuration_intern_vit.InternVisionConfig",
298
+ "AutoModel": "modeling_intern_vit.InternVisionModel"
299
+ },
300
+ "bad_words_ids": null,
301
+ "begin_suppress_tokens": null,
302
+ "bos_token_id": null,
303
+ "chunk_size_feed_forward": 0,
304
+ "cross_attention_hidden_size": null,
305
+ "decoder_start_token_id": null,
306
+ "diversity_penalty": 0.0,
307
+ "do_sample": false,
308
+ "drop_path_rate": 0.4,
309
+ "dropout": 0.0,
310
+ "early_stopping": false,
311
+ "encoder_no_repeat_ngram_size": 0,
312
+ "eos_token_id": null,
313
+ "exponential_decay_length_penalty": null,
314
+ "finetuning_task": null,
315
+ "forced_bos_token_id": null,
316
+ "forced_eos_token_id": null,
317
+ "hidden_act": "gelu",
318
+ "hidden_size": 3200,
319
+ "id2label": {
320
+ "0": "LABEL_0",
321
+ "1": "LABEL_1"
322
+ },
323
+ "image_size": 448,
324
+ "initializer_factor": 0.1,
325
+ "initializer_range": 1e-10,
326
+ "intermediate_size": 12800,
327
+ "is_decoder": false,
328
+ "is_encoder_decoder": false,
329
+ "label2id": {
330
+ "LABEL_0": 0,
331
+ "LABEL_1": 1
332
+ },
333
+ "layer_norm_eps": 1e-06,
334
+ "length_penalty": 1.0,
335
+ "max_length": 20,
336
+ "min_length": 0,
337
+ "model_type": "clip_vision_model",
338
+ "no_repeat_ngram_size": 0,
339
+ "norm_type": "rms_norm",
340
+ "num_attention_heads": 25,
341
+ "num_beam_groups": 1,
342
+ "num_beams": 1,
343
+ "num_channels": 3,
344
+ "num_hidden_layers": 45,
345
+ "num_return_sequences": 1,
346
+ "output_attentions": false,
347
+ "output_hidden_states": false,
348
+ "output_scores": false,
349
+ "pad_token_id": null,
350
+ "patch_size": 14,
351
+ "prefix": null,
352
+ "problem_type": null,
353
+ "projection_dim": 512,
354
+ "pruned_heads": {},
355
+ "qk_normalization": true,
356
+ "qkv_bias": false,
357
+ "remove_invalid_values": false,
358
+ "repetition_penalty": 1.0,
359
+ "return_dict": true,
360
+ "return_dict_in_generate": false,
361
+ "sep_token_id": null,
362
+ "suppress_tokens": null,
363
+ "task_specific_params": null,
364
+ "temperature": 1.0,
365
+ "tf_legacy_loss": false,
366
+ "tie_encoder_decoder": false,
367
+ "tie_word_embeddings": true,
368
+ "tokenizer_class": null,
369
+ "top_k": 50,
370
+ "top_p": 1.0,
371
+ "torch_dtype": "bfloat16",
372
+ "torchscript": false,
373
+ "transformers_version": "4.34.0",
374
+ "typical_p": 1.0,
375
+ "use_bfloat16": true,
376
+ "use_flash_attn": true
377
+ },
378
+ "vis_output_layer": -1,
379
+ "vl_bridge_type": "internvl_mlp"
380
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.34.0"
4
+ }
pytorch_model-00001-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a06f7f2016a04582900a3017802daaef053e7a56b07181914ed0a2b4cf7df763
3
+ size 9977168098
pytorch_model-00002-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc8e4a61cbe7d43b2c8071a52383aec637803eb9b7c630888fcff8b454e4ca3
3
+ size 9874535377
pytorch_model-00003-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1628060796640dd20589beadbde13508b45e067f787f038bfc9553ea5929017b
3
+ size 9602976555
pytorch_model-00004-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875edf0b81e85fce994fbe808448f5d2227ed38029f244cd19790e3ab2a4e7ff
3
+ size 9764649377
pytorch_model-00005-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10293ca02bab4333073d56b5eb28a9d5d1c444d70766b132aa1611dee8f1f1d8
3
+ size 9764649329
pytorch_model-00006-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90adc0c9ec38b9e78186c0e02fa55270d881393ce24c8eba66fb9bd293cc0398
3
+ size 9966026123
pytorch_model-00007-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:241015133f48b5622d6876421e2a19574919fcaf1ad06a9f8179effe07470259
3
+ size 9915644659
pytorch_model-00008-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56fa3bd42f1c5e4d658633bf83d7e6415b4939931ae165bc3564a2fa296e9f86
3
+ size 9764649329
pytorch_model-00009-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5c7829dfed10e6c71487fb87c3ce75db8bde769cbb04657acc312febdb81185
3
+ size 9966026123
pytorch_model-00010-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d9860089c4c179303a3a6b33e0c8d87760a6c46d9ccdfb5c465f4ec9bd02d01
3
+ size 9915644659
pytorch_model-00011-of-00011.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f851cae84592ef57cf90cee5100de3dd623dff537274cbbeb37a8beaa7efeafb
3
+ size 7990731575
pytorch_model.bin.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>",
6
+ "<|plugin|>",
7
+ "<|interpreter|>",
8
+ "<|action_end|>",
9
+ "<|action_start|>",
10
+ "<|im_end|>",
11
+ "<|im_start|>",
12
+ "<img>",
13
+ "</img>",
14
+ "<IMG_CONTEXT>",
15
+ "<quad>",
16
+ "</quad>",
17
+ "<ref>",
18
+ "</ref>",
19
+ "<box>",
20
+ "</box>",
21
+ "[PAD]",
22
+ "<image>",
23
+ "<im_patch>",
24
+ "<region>",
25
+ "<reg>",
26
+ "</reg>",
27
+ "<det>",
28
+ "</det>",
29
+ "<grd>",
30
+ "</grd>",
31
+ "[DET]",
32
+ "[GRD]",
33
+ "[SEG]",
34
+ "[POSE]",
35
+ "[GEN]",
36
+ "[EDIT]",
37
+ "[EMB]",
38
+ "[EMB2]",
39
+ "[EMB3]",
40
+ "[EMB4]",
41
+ "[EMB5]",
42
+ "[EMB6]",
43
+ "[EMB7]",
44
+ "[EMB8]"
45
+ ],
46
+ "bos_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "pad_token": "<unk>",
49
+ "unk_token": "<unk>"
50
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92544": {
76
+ "content": "<img>",
77
+ "lstrip": true,
78
+ "normalized": false,
79
+ "rstrip": true,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92545": {
84
+ "content": "</img>",
85
+ "lstrip": true,
86
+ "normalized": false,
87
+ "rstrip": true,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92546": {
92
+ "content": "<IMG_CONTEXT>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92547": {
100
+ "content": "<quad>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92548": {
108
+ "content": "</quad>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92549": {
116
+ "content": "<ref>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "92550": {
124
+ "content": "</ref>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "92551": {
132
+ "content": "<box>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "92552": {
140
+ "content": "</box>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "92553": {
148
+ "content": "[PAD]",
149
+ "lstrip": true,
150
+ "normalized": false,
151
+ "rstrip": true,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "92554": {
156
+ "content": "<image>",
157
+ "lstrip": true,
158
+ "normalized": false,
159
+ "rstrip": true,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "92555": {
164
+ "content": "<im_patch>",
165
+ "lstrip": true,
166
+ "normalized": false,
167
+ "rstrip": true,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "92556": {
172
+ "content": "<region>",
173
+ "lstrip": true,
174
+ "normalized": false,
175
+ "rstrip": true,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "92557": {
180
+ "content": "<reg>",
181
+ "lstrip": true,
182
+ "normalized": false,
183
+ "rstrip": true,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "92558": {
188
+ "content": "</reg>",
189
+ "lstrip": true,
190
+ "normalized": false,
191
+ "rstrip": true,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "92559": {
196
+ "content": "<det>",
197
+ "lstrip": true,
198
+ "normalized": false,
199
+ "rstrip": true,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "92560": {
204
+ "content": "</det>",
205
+ "lstrip": true,
206
+ "normalized": false,
207
+ "rstrip": true,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "92561": {
212
+ "content": "<grd>",
213
+ "lstrip": true,
214
+ "normalized": false,
215
+ "rstrip": true,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "92562": {
220
+ "content": "</grd>",
221
+ "lstrip": true,
222
+ "normalized": false,
223
+ "rstrip": true,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "92563": {
228
+ "content": "[DET]",
229
+ "lstrip": true,
230
+ "normalized": false,
231
+ "rstrip": true,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "92564": {
236
+ "content": "[GRD]",
237
+ "lstrip": true,
238
+ "normalized": false,
239
+ "rstrip": true,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "92565": {
244
+ "content": "[SEG]",
245
+ "lstrip": true,
246
+ "normalized": false,
247
+ "rstrip": true,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "92566": {
252
+ "content": "[POSE]",
253
+ "lstrip": true,
254
+ "normalized": false,
255
+ "rstrip": true,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "92567": {
260
+ "content": "[GEN]",
261
+ "lstrip": true,
262
+ "normalized": false,
263
+ "rstrip": true,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "92568": {
268
+ "content": "[EDIT]",
269
+ "lstrip": true,
270
+ "normalized": false,
271
+ "rstrip": true,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "92569": {
276
+ "content": "[EMB]",
277
+ "lstrip": true,
278
+ "normalized": false,
279
+ "rstrip": true,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "92570": {
284
+ "content": "[EMB2]",
285
+ "lstrip": true,
286
+ "normalized": false,
287
+ "rstrip": true,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "92571": {
292
+ "content": "[EMB3]",
293
+ "lstrip": true,
294
+ "normalized": false,
295
+ "rstrip": true,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "92572": {
300
+ "content": "[EMB4]",
301
+ "lstrip": true,
302
+ "normalized": false,
303
+ "rstrip": true,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "92573": {
308
+ "content": "[EMB5]",
309
+ "lstrip": true,
310
+ "normalized": false,
311
+ "rstrip": true,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "92574": {
316
+ "content": "[EMB6]",
317
+ "lstrip": true,
318
+ "normalized": false,
319
+ "rstrip": true,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "92575": {
324
+ "content": "[EMB7]",
325
+ "lstrip": true,
326
+ "normalized": false,
327
+ "rstrip": true,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "92576": {
332
+ "content": "[EMB8]",
333
+ "lstrip": true,
334
+ "normalized": false,
335
+ "rstrip": true,
336
+ "single_word": false,
337
+ "special": true
338
+ }
339
+ },
340
+ "additional_special_tokens": [
341
+ "<unk>",
342
+ "<s>",
343
+ "</s>",
344
+ "<|plugin|>",
345
+ "<|interpreter|>",
346
+ "<|action_end|>",
347
+ "<|action_start|>",
348
+ "<|im_end|>",
349
+ "<|im_start|>",
350
+ "<img>",
351
+ "</img>",
352
+ "<IMG_CONTEXT>",
353
+ "<quad>",
354
+ "</quad>",
355
+ "<ref>",
356
+ "</ref>",
357
+ "<box>",
358
+ "</box>",
359
+ "[PAD]",
360
+ "<image>",
361
+ "<im_patch>",
362
+ "<region>",
363
+ "<reg>",
364
+ "</reg>",
365
+ "<det>",
366
+ "</det>",
367
+ "<grd>",
368
+ "</grd>",
369
+ "[DET]",
370
+ "[GRD]",
371
+ "[SEG]",
372
+ "[POSE]",
373
+ "[GEN]",
374
+ "[EDIT]",
375
+ "[EMB]",
376
+ "[EMB2]",
377
+ "[EMB3]",
378
+ "[EMB4]",
379
+ "[EMB5]",
380
+ "[EMB6]",
381
+ "[EMB7]",
382
+ "[EMB8]"
383
+ ],
384
+ "auto_map": {
385
+ "AutoTokenizer": [
386
+ "tokenization_internlm2.InternLM2Tokenizer",
387
+ null
388
+ ]
389
+ },
390
+ "bos_token": "<s>",
391
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
392
+ "clean_up_tokenization_spaces": false,
393
+ "eos_token": "</s>",
394
+ "model_max_length": 4096,
395
+ "pad_token": "<unk>",
396
+ "padding_side": "right",
397
+ "tokenizer_class": "InternLM2Tokenizer",
398
+ "tokenizer_file": null,
399
+ "unk_token": "<unk>"
400
+ }