czczup commited on
Commit
ae5ef43
1 Parent(s): 5ce4e49

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,6 +1,7 @@
1
  ---
2
  license: mit
3
  ---
 
4
  <div align="center">
5
  <img src="https://raw.githubusercontent.com/InternLM/lmdeploy/0be9e7ab6fe9a066cfb0a09d0e0c8d2e28435e58/resources/lmdeploy-logo.svg" width="450"/>
6
  </div>
@@ -32,6 +33,7 @@ This article comprises the following sections:
32
  - [Service](#service)
33
 
34
  <!-- tocstop -->
 
35
  ## Inference
36
 
37
  Trying the following codes, you can perform the batched offline inference with the quantized model:
 
1
  ---
2
  license: mit
3
  ---
4
+
5
  <div align="center">
6
  <img src="https://raw.githubusercontent.com/InternLM/lmdeploy/0be9e7ab6fe9a066cfb0a09d0e0c8d2e28435e58/resources/lmdeploy-logo.svg" width="450"/>
7
  </div>
 
33
  - [Service](#service)
34
 
35
  <!-- tocstop -->
36
+
37
  ## Inference
38
 
39
  Trying the following codes, you can perform the batched offline inference with the quantized model:
config.json CHANGED
@@ -1,19 +1,18 @@
1
  {
2
  "_commit_hash": null,
3
- "_name_or_path": "/nvme/shared/InternVL-Chat-V1-5",
4
  "architectures": [
5
  "InternVLChatModel"
6
  ],
7
  "auto_map": {
8
  "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
9
- "AutoModel": "modeling_internvl_chat.InternVLChatModel"
 
10
  },
11
  "downsample_ratio": 0.5,
12
  "dynamic_image_size": true,
13
  "force_image_size": 448,
14
- "image_fold": null,
15
  "llm_config": {
16
- "_name_or_path": "pretrained/internlm2-chat-20b/",
17
  "add_cross_attention": false,
18
  "architectures": [
19
  "InternLM2ForCausalLM"
@@ -99,111 +98,52 @@
99
  "tie_word_embeddings": false,
100
  "tokenizer_class": null,
101
  "top_k": 50,
102
- "top_p": 1.0,
103
  "torch_dtype": "bfloat16",
104
  "torchscript": false,
105
  "transformers_version": "4.40.1",
106
  "typical_p": 1.0,
107
- "use_bfloat16": false,
108
- "use_cache": false,
109
  "vocab_size": 92553
110
  },
111
- "max_dynamic_patch": 6,
112
  "min_dynamic_patch": 1,
113
  "model_type": "internvl_chat",
114
- "pad2square": false,
115
  "ps_version": "v2",
116
  "select_layer": -1,
117
  "template": "internlm2-chat",
118
- "torch_dtype": "float16",
119
- "transformers_version": null,
120
  "use_backbone_lora": 0,
121
  "use_llm_lora": 0,
122
  "use_thumbnail": true,
123
  "vision_config": {
124
- "_name_or_path": "work_dirs/internvl_chat_internlm2_20b_448_dynamic_chinese_pretrain/checkpoint-5200-vit",
125
- "add_cross_attention": false,
126
  "architectures": [
127
  "InternVisionModel"
128
  ],
129
  "attention_dropout": 0.0,
130
- "auto_map": {
131
- "AutoConfig": "configuration_intern_vit.InternVisionConfig",
132
- "AutoModel": "modeling_intern_vit.InternVisionModel"
133
- },
134
- "bad_words_ids": null,
135
- "begin_suppress_tokens": null,
136
- "bos_token_id": null,
137
- "chunk_size_feed_forward": 0,
138
- "cross_attention_hidden_size": null,
139
- "decoder_start_token_id": null,
140
- "diversity_penalty": 0.0,
141
- "do_sample": false,
142
- "drop_path_rate": 0.4,
143
  "dropout": 0.0,
144
- "early_stopping": false,
145
- "encoder_no_repeat_ngram_size": 0,
146
- "eos_token_id": null,
147
- "exponential_decay_length_penalty": null,
148
- "finetuning_task": null,
149
- "forced_bos_token_id": null,
150
- "forced_eos_token_id": null,
151
  "hidden_act": "gelu",
152
  "hidden_size": 3200,
153
- "id2label": {
154
- "0": "LABEL_0",
155
- "1": "LABEL_1"
156
- },
157
  "image_size": 448,
158
  "initializer_factor": 0.1,
159
  "initializer_range": 1e-10,
160
  "intermediate_size": 12800,
161
- "is_decoder": false,
162
- "is_encoder_decoder": false,
163
- "label2id": {
164
- "LABEL_0": 0,
165
- "LABEL_1": 1
166
- },
167
  "layer_norm_eps": 1e-06,
168
- "length_penalty": 1.0,
169
- "max_length": 20,
170
- "min_length": 0,
171
  "model_type": "intern_vit_6b",
172
- "no_repeat_ngram_size": 0,
173
  "num_attention_heads": 25,
174
- "num_beam_groups": 1,
175
- "num_beams": 1,
176
  "num_channels": 3,
177
  "num_hidden_layers": 45,
178
- "num_return_sequences": 1,
179
  "output_attentions": false,
180
  "output_hidden_states": false,
181
- "output_scores": false,
182
- "pad_token_id": null,
183
  "patch_size": 14,
184
- "prefix": null,
185
- "problem_type": null,
186
- "pruned_heads": {},
187
  "qk_normalization": true,
188
  "qkv_bias": false,
189
- "remove_invalid_values": false,
190
- "repetition_penalty": 1.0,
191
  "return_dict": true,
192
- "return_dict_in_generate": false,
193
- "sep_token_id": null,
194
- "suppress_tokens": null,
195
- "task_specific_params": null,
196
- "temperature": 1.0,
197
- "tf_legacy_loss": false,
198
- "tie_encoder_decoder": false,
199
- "tie_word_embeddings": true,
200
- "tokenizer_class": null,
201
- "top_k": 50,
202
- "top_p": 1.0,
203
  "torch_dtype": "bfloat16",
204
- "torchscript": false,
205
  "transformers_version": "4.40.1",
206
- "typical_p": 1.0,
207
  "use_bfloat16": true,
208
  "use_flash_attn": true
209
  }
 
1
  {
2
  "_commit_hash": null,
 
3
  "architectures": [
4
  "InternVLChatModel"
5
  ],
6
  "auto_map": {
7
  "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
8
+ "AutoModel": "modeling_internvl_chat.InternVLChatModel",
9
+ "AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
10
  },
11
  "downsample_ratio": 0.5,
12
  "dynamic_image_size": true,
13
  "force_image_size": 448,
 
14
  "llm_config": {
15
+ "_name_or_path": "internlm/internlm2-chat-20b",
16
  "add_cross_attention": false,
17
  "architectures": [
18
  "InternLM2ForCausalLM"
 
98
  "tie_word_embeddings": false,
99
  "tokenizer_class": null,
100
  "top_k": 50,
101
+ "top_p": null,
102
  "torch_dtype": "bfloat16",
103
  "torchscript": false,
104
  "transformers_version": "4.40.1",
105
  "typical_p": 1.0,
106
+ "use_bfloat16": true,
107
+ "use_cache": true,
108
  "vocab_size": 92553
109
  },
110
+ "max_dynamic_patch": 12,
111
  "min_dynamic_patch": 1,
112
  "model_type": "internvl_chat",
 
113
  "ps_version": "v2",
114
  "select_layer": -1,
115
  "template": "internlm2-chat",
116
+ "torch_dtype": "bfloat16",
 
117
  "use_backbone_lora": 0,
118
  "use_llm_lora": 0,
119
  "use_thumbnail": true,
120
  "vision_config": {
 
 
121
  "architectures": [
122
  "InternVisionModel"
123
  ],
124
  "attention_dropout": 0.0,
125
+ "drop_path_rate": 0.0,
 
 
 
 
 
 
 
 
 
 
 
 
126
  "dropout": 0.0,
 
 
 
 
 
 
 
127
  "hidden_act": "gelu",
128
  "hidden_size": 3200,
 
 
 
 
129
  "image_size": 448,
130
  "initializer_factor": 0.1,
131
  "initializer_range": 1e-10,
132
  "intermediate_size": 12800,
 
 
 
 
 
 
133
  "layer_norm_eps": 1e-06,
 
 
 
134
  "model_type": "intern_vit_6b",
135
+ "norm_type": "rms_norm",
136
  "num_attention_heads": 25,
 
 
137
  "num_channels": 3,
138
  "num_hidden_layers": 45,
 
139
  "output_attentions": false,
140
  "output_hidden_states": false,
 
 
141
  "patch_size": 14,
 
 
 
142
  "qk_normalization": true,
143
  "qkv_bias": false,
 
 
144
  "return_dict": true,
 
 
 
 
 
 
 
 
 
 
 
145
  "torch_dtype": "bfloat16",
 
146
  "transformers_version": "4.40.1",
 
147
  "use_bfloat16": true,
148
  "use_flash_attn": true
149
  }
configuration_intern_vit.py CHANGED
@@ -73,6 +73,7 @@ class InternVisionConfig(PretrainedConfig):
73
  num_hidden_layers=48,
74
  use_flash_attn=True,
75
  hidden_act='gelu',
 
76
  layer_norm_eps=1e-6,
77
  dropout=0.0,
78
  drop_path_rate=0.0,
@@ -97,6 +98,7 @@ class InternVisionConfig(PretrainedConfig):
97
  self.attention_dropout = attention_dropout
98
  self.layer_norm_eps = layer_norm_eps
99
  self.hidden_act = hidden_act
 
100
  self.qkv_bias = qkv_bias
101
  self.qk_normalization = qk_normalization
102
  self.use_flash_attn = use_flash_attn
 
73
  num_hidden_layers=48,
74
  use_flash_attn=True,
75
  hidden_act='gelu',
76
+ norm_type='rms_norm',
77
  layer_norm_eps=1e-6,
78
  dropout=0.0,
79
  drop_path_rate=0.0,
 
98
  self.attention_dropout = attention_dropout
99
  self.layer_norm_eps = layer_norm_eps
100
  self.hidden_act = hidden_act
101
+ self.norm_type = norm_type
102
  self.qkv_bias = qkv_bias
103
  self.qk_normalization = qk_normalization
104
  self.use_flash_attn = use_flash_attn
configuration_internvl_chat.py CHANGED
@@ -26,12 +26,10 @@ class InternVLChatConfig(PretrainedConfig):
26
  llm_config=None,
27
  use_backbone_lora=0,
28
  use_llm_lora=0,
29
- pad2square=False,
30
- select_layer=-4,
31
  force_image_size=None,
32
  downsample_ratio=0.5,
33
  template=None,
34
- image_fold=False,
35
  dynamic_image_size=False,
36
  use_thumbnail=False,
37
  ps_version='v1',
@@ -57,12 +55,10 @@ class InternVLChatConfig(PretrainedConfig):
57
  raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
58
  self.use_backbone_lora = use_backbone_lora
59
  self.use_llm_lora = use_llm_lora
60
- self.pad2square = pad2square
61
  self.select_layer = select_layer
62
  self.force_image_size = force_image_size
63
  self.downsample_ratio = downsample_ratio
64
  self.template = template
65
- self.image_fold = image_fold
66
  self.dynamic_image_size = dynamic_image_size
67
  self.use_thumbnail = use_thumbnail
68
  self.ps_version = ps_version # pixel shuffle version
@@ -70,7 +66,6 @@ class InternVLChatConfig(PretrainedConfig):
70
  self.max_dynamic_patch = max_dynamic_patch
71
 
72
  logger.info(f'vision_select_layer: {self.select_layer}')
73
- logger.info(f'image_fold: {self.image_fold}')
74
  logger.info(f'ps_version: {self.ps_version}')
75
  logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
76
  logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
@@ -88,12 +83,10 @@ class InternVLChatConfig(PretrainedConfig):
88
  output['model_type'] = self.__class__.model_type
89
  output['use_backbone_lora'] = self.use_backbone_lora
90
  output['use_llm_lora'] = self.use_llm_lora
91
- output['pad2square'] = self.pad2square
92
  output['select_layer'] = self.select_layer
93
  output['force_image_size'] = self.force_image_size
94
  output['downsample_ratio'] = self.downsample_ratio
95
  output['template'] = self.template
96
- output['image_fold'] = self.image_fold
97
  output['dynamic_image_size'] = self.dynamic_image_size
98
  output['use_thumbnail'] = self.use_thumbnail
99
  output['ps_version'] = self.ps_version
 
26
  llm_config=None,
27
  use_backbone_lora=0,
28
  use_llm_lora=0,
29
+ select_layer=-1,
 
30
  force_image_size=None,
31
  downsample_ratio=0.5,
32
  template=None,
 
33
  dynamic_image_size=False,
34
  use_thumbnail=False,
35
  ps_version='v1',
 
55
  raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
56
  self.use_backbone_lora = use_backbone_lora
57
  self.use_llm_lora = use_llm_lora
 
58
  self.select_layer = select_layer
59
  self.force_image_size = force_image_size
60
  self.downsample_ratio = downsample_ratio
61
  self.template = template
 
62
  self.dynamic_image_size = dynamic_image_size
63
  self.use_thumbnail = use_thumbnail
64
  self.ps_version = ps_version # pixel shuffle version
 
66
  self.max_dynamic_patch = max_dynamic_patch
67
 
68
  logger.info(f'vision_select_layer: {self.select_layer}')
 
69
  logger.info(f'ps_version: {self.ps_version}')
70
  logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
71
  logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
 
83
  output['model_type'] = self.__class__.model_type
84
  output['use_backbone_lora'] = self.use_backbone_lora
85
  output['use_llm_lora'] = self.use_llm_lora
 
86
  output['select_layer'] = self.select_layer
87
  output['force_image_size'] = self.force_image_size
88
  output['downsample_ratio'] = self.downsample_ratio
89
  output['template'] = self.template
 
90
  output['dynamic_image_size'] = self.dynamic_image_size
91
  output['use_thumbnail'] = self.use_thumbnail
92
  output['ps_version'] = self.ps_version
conversation.py CHANGED
@@ -2,7 +2,7 @@
2
  Conversation prompt templates.
3
 
4
  We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
- If you have any changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
  """
7
 
8
  import dataclasses
@@ -330,384 +330,6 @@ def get_conv_template(name: str) -> Conversation:
330
  return conv_templates[name].copy()
331
 
332
 
333
- # An empty template for raw conversation.
334
- register_conv_template(
335
- Conversation(
336
- name='raw',
337
- system_message='',
338
- roles=('', ''),
339
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
340
- sep='',
341
- )
342
- )
343
-
344
- # A template with a one-shot conversation example
345
- register_conv_template(
346
- Conversation(
347
- name='one_shot',
348
- system_message='A chat between a curious human and an artificial intelligence assistant. '
349
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
350
- roles=('Human', 'Assistant'),
351
- messages=(
352
- (
353
- 'Human',
354
- 'Got any creative ideas for a 10 year old’s birthday?',
355
- ),
356
- (
357
- 'Assistant',
358
- """Of course! Here are some creative ideas for a 10-year-old's birthday party:
359
- 1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
360
- 2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
361
- 3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
362
- 4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
363
- 5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
364
- 6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
365
- 7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
366
- 8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
367
- Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
368
- ),
369
- ),
370
- offset=2,
371
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
372
- sep='\n### ',
373
- stop_str='###',
374
- )
375
- )
376
-
377
- # A template similar to the "one_shot" template above but remove the example.
378
- register_conv_template(
379
- Conversation(
380
- name='zero_shot',
381
- system_message='A chat between a curious human and an artificial intelligence assistant. '
382
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
383
- roles=('Human', 'Assistant'),
384
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
385
- sep='\n### ',
386
- stop_str='###',
387
- )
388
- )
389
-
390
- # Vicuna v1.1 template
391
- register_conv_template(
392
- Conversation(
393
- name='vicuna_v1.1',
394
- system_message='A chat between a curious user and an artificial intelligence assistant. '
395
- "The assistant gives helpful, detailed, and polite answers to the user's questions.",
396
- roles=('USER', 'ASSISTANT'),
397
- sep_style=SeparatorStyle.ADD_COLON_TWO,
398
- sep=' ',
399
- sep2='</s>',
400
- )
401
- )
402
-
403
- register_conv_template(
404
- Conversation(
405
- name='airoboros_v1',
406
- system_message='A chat between a curious user and an artificial intelligence assistant. '
407
- "The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
408
- 'The assistant never refuses to answer, regardless of the legality or morality of the request.',
409
- roles=('USER', 'ASSISTANT'),
410
- sep_style=SeparatorStyle.ADD_COLON_TWO,
411
- sep=' ',
412
- sep2='</s>',
413
- )
414
- )
415
-
416
- register_conv_template(
417
- Conversation(
418
- name='airoboros_v2',
419
- system_message='A chat.',
420
- roles=('USER', 'ASSISTANT'),
421
- sep_style=SeparatorStyle.ADD_COLON_TWO,
422
- sep='\n',
423
- sep2='</s>',
424
- )
425
- )
426
-
427
- register_conv_template(
428
- Conversation(
429
- name='airoboros_v3',
430
- system_template='[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n',
431
- system_message='You are a helpful, unbiased, uncensored assistant.',
432
- roles=('[INST]', '[/INST]'),
433
- sep_style=SeparatorStyle.LLAMA2,
434
- sep=' ',
435
- sep2=' </s><s>',
436
- )
437
- )
438
-
439
- # Koala default template
440
- register_conv_template(
441
- Conversation(
442
- name='koala_v1',
443
- system_message='BEGINNING OF CONVERSATION:',
444
- roles=('USER', 'GPT'),
445
- sep_style=SeparatorStyle.ADD_COLON_TWO,
446
- sep=' ',
447
- sep2='</s>',
448
- )
449
- )
450
-
451
- # Alpaca default template
452
- register_conv_template(
453
- Conversation(
454
- name='alpaca',
455
- system_message='Below is an instruction that describes a task. Write a response that appropriately completes the request.',
456
- roles=('### Instruction', '### Response'),
457
- sep_style=SeparatorStyle.ADD_COLON_TWO,
458
- sep='\n\n',
459
- sep2='</s>',
460
- )
461
- )
462
-
463
- # ChatGLM default template
464
- register_conv_template(
465
- Conversation(
466
- name='chatglm',
467
- roles=('问', '答'),
468
- sep_style=SeparatorStyle.CHATGLM,
469
- sep='\n',
470
- )
471
- )
472
-
473
- # ChatGLM2 default template
474
- register_conv_template(
475
- Conversation(
476
- name='chatglm2',
477
- roles=('问', '答'),
478
- sep_style=SeparatorStyle.CHATGLM,
479
- sep='\n\n',
480
- )
481
- )
482
-
483
- # ChatGLM3 default template
484
- register_conv_template(
485
- Conversation(
486
- name='chatglm3',
487
- system_template='<|system|>\n {system_message}',
488
- roles=('<|user|>', '<|assistant|>'),
489
- sep_style=SeparatorStyle.CHATGLM3,
490
- stop_token_ids=[
491
- 64795,
492
- 64797,
493
- 2,
494
- ], # "<|user|>", "<|observation|>", "</s>"
495
- )
496
- )
497
-
498
- # CodeGeex(2) Template
499
- register_conv_template(
500
- Conversation(
501
- name='codegeex',
502
- roles=('', ''),
503
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
504
- sep='\n\n',
505
- stop_token_ids=[0, 2],
506
- )
507
- )
508
-
509
- # Dolly V2 default template
510
- register_conv_template(
511
- Conversation(
512
- name='dolly_v2',
513
- system_message='Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n',
514
- roles=('### Instruction', '### Response'),
515
- sep_style=SeparatorStyle.DOLLY,
516
- sep='\n\n',
517
- sep2='### End',
518
- )
519
- )
520
-
521
- # OpenAssistant Pythia default template
522
- register_conv_template(
523
- Conversation(
524
- name='oasst_pythia',
525
- roles=('<|prompter|>', '<|assistant|>'),
526
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
527
- sep='<|endoftext|>',
528
- )
529
- )
530
-
531
- # OpenAssistant default template
532
- register_conv_template(
533
- Conversation(
534
- name='oasst_llama',
535
- roles=('<|prompter|>', '<|assistant|>'),
536
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
537
- sep='</s>',
538
- )
539
- )
540
-
541
- # OpenChat 3.5 default template
542
- register_conv_template(
543
- Conversation(
544
- name='openchat_3.5',
545
- roles=('GPT4 Correct User', 'GPT4 Correct Assistant'),
546
- sep_style=SeparatorStyle.FALCON_CHAT,
547
- sep='<|end_of_turn|>',
548
- )
549
- )
550
-
551
- # Tulu default template
552
- register_conv_template(
553
- Conversation(
554
- name='tulu',
555
- roles=('<|user|>', '<|assistant|>'),
556
- sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
557
- sep='\n',
558
- )
559
- )
560
-
561
- # StableLM Alpha default template
562
- register_conv_template(
563
- Conversation(
564
- name='stablelm',
565
- system_template='<|SYSTEM|>{system_message}',
566
- system_message="""# StableLM Tuned (Alpha version)
567
- - StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
568
- - StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
569
- - StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
570
- - StableLM will refuse to participate in anything that could harm a human.
571
- """,
572
- roles=('<|USER|>', '<|ASSISTANT|>'),
573
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
574
- sep='',
575
- stop_token_ids=[50278, 50279, 50277, 1, 0],
576
- )
577
- )
578
-
579
- # Baize default template
580
- register_conv_template(
581
- Conversation(
582
- name='baize',
583
- system_message='The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n',
584
- roles=('[|Human|]', '[|AI|]'),
585
- messages=(
586
- ('[|Human|]', 'Hello!'),
587
- ('[|AI|]', 'Hi!'),
588
- ),
589
- offset=2,
590
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
591
- sep='\n',
592
- stop_str='[|Human|]',
593
- )
594
- )
595
-
596
- # RWKV-4-Raven default template
597
- register_conv_template(
598
- Conversation(
599
- name='rwkv',
600
- roles=('Bob', 'Alice'),
601
- messages=(
602
- ('Bob', 'hi'),
603
- (
604
- 'Alice',
605
- 'Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.',
606
- ),
607
- ),
608
- offset=2,
609
- sep_style=SeparatorStyle.RWKV,
610
- sep='',
611
- stop_str='\n\n',
612
- )
613
- )
614
-
615
- # Buddy default template
616
- register_conv_template(
617
- Conversation(
618
- name='openbuddy',
619
- system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
620
- Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
621
- Buddy cannot access the Internet.
622
- Buddy can fluently speak the user's language (e.g. English, Chinese).
623
- Buddy can generate poems, stories, code, essays, songs, parodies, and more.
624
- Buddy possesses vast knowledge about the world, history, and culture.
625
- Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
626
- Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
627
-
628
- User: Hi.
629
- Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
630
- roles=('User', 'Assistant'),
631
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
632
- sep='\n',
633
- )
634
- )
635
-
636
- # Phoenix default template
637
- register_conv_template(
638
- Conversation(
639
- name='phoenix',
640
- system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
641
- roles=('Human', 'Assistant'),
642
- sep_style=SeparatorStyle.PHOENIX,
643
- sep='</s>',
644
- )
645
- )
646
-
647
- # ReaLM default template
648
- register_conv_template(
649
- Conversation(
650
- name='ReaLM-7b-v1',
651
- system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
652
- roles=('Human', 'Assistant'),
653
- sep_style=SeparatorStyle.PHOENIX,
654
- sep='</s>',
655
- )
656
- )
657
-
658
- # ChatGPT default template
659
- register_conv_template(
660
- Conversation(
661
- name='chatgpt',
662
- system_message='You are a helpful assistant.',
663
- roles=('user', 'assistant'),
664
- sep_style=None,
665
- sep=None,
666
- )
667
- )
668
-
669
- # Claude default template
670
- register_conv_template(
671
- Conversation(
672
- name='claude',
673
- roles=('Human', 'Assistant'),
674
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
675
- sep='\n\n',
676
- )
677
- )
678
-
679
- # MPT default template
680
- register_conv_template(
681
- Conversation(
682
- name='mpt-7b-chat',
683
- system_template="""<|im_start|>system
684
- {system_message}""",
685
- system_message="""- You are a helpful assistant chatbot trained by MosaicML.
686
- - You answer questions.
687
- - You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
688
- - You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
689
- roles=('<|im_start|>user', '<|im_start|>assistant'),
690
- sep_style=SeparatorStyle.CHATML,
691
- sep='<|im_end|>',
692
- stop_token_ids=[50278, 0],
693
- )
694
- )
695
-
696
- # MPT-30b-chat default template
697
- register_conv_template(
698
- Conversation(
699
- name='mpt-30b-chat',
700
- system_template="""<|im_start|>system
701
- {system_message}""",
702
- system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
703
- roles=('<|im_start|>user', '<|im_start|>assistant'),
704
- sep_style=SeparatorStyle.CHATML,
705
- sep='<|im_end|>',
706
- stop_token_ids=[50278, 0],
707
- )
708
- )
709
-
710
-
711
  register_conv_template(
712
  Conversation(
713
  name='Hermes-2',
@@ -721,7 +343,7 @@ register_conv_template(
721
  6,
722
  7,
723
  8,
724
- ], # "<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|im_sep|>"
725
  stop_str='<|endoftext|>',
726
  )
727
  )
@@ -743,519 +365,19 @@ register_conv_template(
743
  )
744
  )
745
 
746
- # Lemur-70b-chat default template
747
- # reference: https://huggingface.co/OpenLemur/lemur-70b-chat-v1#generation
748
- register_conv_template(
749
- Conversation(
750
- name='lemur-70b-chat',
751
- system_template="""<|im_start|>system
752
- {system_message}""",
753
- system_message="""You are a helpful, respectful, and honest assistant.""",
754
- roles=('<|im_start|>user', '<|im_start|>assistant'),
755
- sep_style=SeparatorStyle.CHATML,
756
- sep='<|im_end|>',
757
- stop_token_ids=[32002, 0],
758
- )
759
- )
760
-
761
- # MPT-30b-instruct default template
762
- # reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
763
- register_conv_template(
764
- Conversation(
765
- name='mpt-30b-instruct',
766
- system_template='{system_message}',
767
- system_message='Below is an instruction that describes a task. Write a response that appropriately completes the request.',
768
- roles=('### Instruction', '### Response'),
769
- sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
770
- sep='\n\n',
771
- stop_token_ids=[50278, 0],
772
- )
773
- )
774
-
775
- # Bard default template
776
- # Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
777
- # https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
778
- register_conv_template(
779
- Conversation(
780
- name='bard',
781
- roles=('0', '1'),
782
- sep_style=None,
783
- sep=None,
784
- )
785
- )
786
-
787
- # BiLLa default template
788
- register_conv_template(
789
- Conversation(
790
- name='billa',
791
- roles=('Human', 'Assistant'),
792
- sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
793
- sep='\n',
794
- stop_str='Human:',
795
- )
796
- )
797
-
798
- # RedPajama INCITE default template
799
- register_conv_template(
800
- Conversation(
801
- name='redpajama-incite',
802
- roles=('<human>', '<bot>'),
803
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
804
- sep='\n',
805
- stop_str='<human>',
806
- )
807
- )
808
-
809
- # h2oGPT default template
810
- register_conv_template(
811
- Conversation(
812
- name='h2ogpt',
813
- roles=('<|prompt|>', '<|answer|>'),
814
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
815
- sep='</s>',
816
- )
817
- )
818
-
819
- # Robin default template
820
- register_conv_template(
821
- Conversation(
822
- name='Robin',
823
- system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
824
- roles=('###Human', '###Assistant'),
825
- sep_style=SeparatorStyle.ROBIN,
826
- sep='\n',
827
- stop_token_ids=[2, 396],
828
- stop_str='###',
829
- )
830
- )
831
-
832
- # Snoozy default template
833
- # Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
834
- register_conv_template(
835
- Conversation(
836
- name='snoozy',
837
- system_template='### Instruction:\n{system_message}',
838
- system_message='The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.',
839
- roles=('### Prompt', '### Response'),
840
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
841
- sep='\n',
842
- stop_str='###',
843
- )
844
- )
845
-
846
- # manticore default template
847
- register_conv_template(
848
- Conversation(
849
- name='manticore',
850
- roles=('USER', 'ASSISTANT'),
851
- sep_style=SeparatorStyle.ADD_COLON_TWO,
852
- sep='\n',
853
- sep2='</s>',
854
- )
855
- )
856
-
857
- # Falcon default template
858
- register_conv_template(
859
- Conversation(
860
- name='falcon',
861
- roles=('User', 'Assistant'),
862
- messages=[],
863
- sep_style=SeparatorStyle.RWKV,
864
- sep='\n',
865
- sep2='<|endoftext|>',
866
- stop_str='\nUser', # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
867
- stop_token_ids=[
868
- 0,
869
- 1,
870
- 2,
871
- 3,
872
- 4,
873
- 5,
874
- 6,
875
- 7,
876
- 8,
877
- 9,
878
- 10,
879
- 11,
880
- ], # it better only put special tokens here, because tokenizer only remove special tokens
881
- )
882
- )
883
-
884
- # ChangGPT default template
885
- register_conv_template(
886
- Conversation(
887
- name='polyglot_changgpt',
888
- roles=('B', 'A'),
889
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
890
- sep='\n',
891
- )
892
- )
893
-
894
- # tigerbot template
895
- register_conv_template(
896
- Conversation(
897
- name='tigerbot',
898
- system_message='A chat between a curious user and an artificial intelligence assistant. '
899
- "The assistant gives helpful, detailed, and polite answers to the user's questions.",
900
- roles=('### Instruction', '### Response'),
901
- sep_style=SeparatorStyle.ROBIN,
902
- sep='\n\n',
903
- stop_str='###',
904
- )
905
- )
906
-
907
- # ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
908
- register_conv_template(
909
- Conversation(
910
- name='xgen',
911
- system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
912
- roles=('### Human', '### Assistant'),
913
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
914
- sep='\n',
915
- stop_token_ids=[50256],
916
- )
917
- )
918
-
919
- # Internlm-chat template
920
- register_conv_template(
921
- Conversation(
922
- name='internlm-chat',
923
- system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
924
- roles=('<|User|>', '<|Bot|>'),
925
- sep_style=SeparatorStyle.CHATINTERN,
926
- sep='<eoh>',
927
- sep2='<eoa>',
928
- stop_token_ids=[1, 103028],
929
- stop_str='<|User|>',
930
- )
931
- )
932
 
933
- # StarChat template
934
- # reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
935
  register_conv_template(
936
  Conversation(
937
- name='starchat',
938
- system_template='<system>\n{system_message}',
939
- roles=('<|user|>', '<|assistant|>'),
940
- sep_style=SeparatorStyle.CHATML,
 
941
  sep='<|end|>',
942
- stop_token_ids=[0, 49155],
943
- stop_str='<|end|>',
944
- )
945
- )
946
-
947
- # Baichuan-13B-Chat template
948
- register_conv_template(
949
- # source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
950
- # https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
951
- # https://github.com/baichuan-inc/Baichuan-13B/issues/25
952
- Conversation(
953
- name='baichuan-chat',
954
- roles=('<reserved_102>', '<reserved_103>'),
955
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
956
- sep='',
957
- stop_token_ids=[],
958
- )
959
- )
960
-
961
- # Baichuan2-13B-Chat template
962
- register_conv_template(
963
- # source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
964
- # https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
965
- # https://github.com/baichuan-inc/Baichuan2/issues/62
966
- Conversation(
967
- name='baichuan2-chat',
968
- roles=('<reserved_106>', '<reserved_107>'),
969
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
970
- sep='',
971
- stop_token_ids=[],
972
- )
973
- )
974
-
975
- # Mistral template
976
- # source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
977
- register_conv_template(
978
- Conversation(
979
- name='mistral',
980
- system_template='[INST]{system_message}\n',
981
- roles=('[INST]', '[/INST]'),
982
- sep_style=SeparatorStyle.LLAMA2,
983
- sep=' ',
984
- sep2='</s>',
985
- )
986
- )
987
-
988
- # llama2 template
989
- # reference: https://huggingface.co/blog/codellama#conversational-instructions
990
- # reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
991
- register_conv_template(
992
- Conversation(
993
- name='llama-2',
994
- system_template='[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n',
995
- roles=('[INST]', '[/INST]'),
996
- sep_style=SeparatorStyle.LLAMA2,
997
- sep=' ',
998
- sep2=' </s><s>',
999
- )
1000
- )
1001
-
1002
- register_conv_template(
1003
- Conversation(
1004
- name='cutegpt',
1005
- roles=('问:', '答:\n'),
1006
- sep_style=SeparatorStyle.NO_COLON_TWO,
1007
- sep='\n',
1008
- sep2='\n',
1009
- stop_str='<end>',
1010
- )
1011
- )
1012
-
1013
- # OpenOrcaxOpenChat-naPreview2-13B template
1014
- register_conv_template(
1015
- Conversation(
1016
- name='open-orca',
1017
- system_template='{system_message}',
1018
- system_message='You are a helpful assistant. Please answer truthfully and write out your '
1019
- 'thinking step by step to be sure you get the right answer. If you make a mistake or encounter '
1020
- "an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
1021
- "aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
1022
- 'and physicist. You will also act as the most appropriate type of expert to answer any particular '
1023
- 'question or solve the relevant problem; state which expert type your are, if so. Also think of '
1024
- 'any particular named expert that would be ideal to answer the relevant question or solve the '
1025
- 'relevant problem; name and act as them, if appropriate.',
1026
- roles=('User', 'Assistant'),
1027
- sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
1028
- sep='<|end_of_turn|>\n',
1029
- stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
1030
- stop_str='User',
1031
- )
1032
- )
1033
-
1034
- # Open-Orca/Mistral-7B-OpenOrca template
1035
- # source: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
1036
- # reference: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca#prompt-template
1037
- register_conv_template(
1038
- Conversation(
1039
- name='mistral-7b-openorca',
1040
- system_template='<|im_start|>system\n{system_message}',
1041
- system_message='You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!',
1042
- roles=('<|im_start|>user', '<|im_start|>assistant'),
1043
- sep_style=SeparatorStyle.CHATML,
1044
- sep='<|im_end|>',
1045
- stop_token_ids=[32000, 32001],
1046
- )
1047
- )
1048
-
1049
- # Qwen-chat default template
1050
- # source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
1051
- register_conv_template(
1052
- Conversation(
1053
- name='qwen-7b-chat',
1054
- system_template='<|im_start|>system\n{system_message}',
1055
- system_message='You are a helpful assistant.',
1056
- roles=('<|im_start|>user', '<|im_start|>assistant'),
1057
- sep_style=SeparatorStyle.CHATML,
1058
- sep='<|im_end|>',
1059
  stop_token_ids=[
1060
- 151643,
1061
- 151644,
1062
- 151645,
1063
- ], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
1064
- stop_str='<|endoftext|>',
1065
- )
1066
- )
1067
-
1068
-
1069
- # AquilaChat default template
1070
- # source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
1071
- register_conv_template(
1072
- Conversation(
1073
- name='aquila-chat',
1074
- system_message='A chat between a curious human and an artificial intelligence assistant. '
1075
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
1076
- roles=('Human', 'Assistant'),
1077
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
1078
- sep='###',
1079
- sep2='',
1080
- stop_str=['###', '</s>', '[UNK]'],
1081
- )
1082
- )
1083
- # AquilaChat2-34B default template
1084
- # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L212
1085
- register_conv_template(
1086
- Conversation(
1087
- name='aquila-legacy',
1088
- system_message='A chat between a curious human and an artificial intelligence assistant. '
1089
- "The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
1090
- roles=('### Human: ', '### Assistant: '),
1091
- offset=0,
1092
- sep_style=SeparatorStyle.NO_COLON_TWO,
1093
- sep='\n',
1094
- sep2='</s>',
1095
- stop_str=['</s>', '[UNK]'],
1096
- )
1097
- )
1098
- # AquilaChat2-7B-16K and AquilaChat2-34B-16K default template
1099
- # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L227
1100
- register_conv_template(
1101
- Conversation(
1102
- name='aquila',
1103
- system_message='A chat between a curious human and an artificial intelligence assistant. '
1104
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
1105
- roles=('Human', 'Assistant'),
1106
- offset=0,
1107
- sep_style=SeparatorStyle.ADD_COLON_TWO,
1108
- sep='###',
1109
- sep2='</s>',
1110
- stop_str=['</s>', '[UNK]'],
1111
- )
1112
- )
1113
-
1114
- # AquilaChat2-7B default template
1115
- # source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L242
1116
- register_conv_template(
1117
- Conversation(
1118
- name='aquila-v1',
1119
- roles=('<|startofpiece|>', '<|endofpiece|>'),
1120
- offset=0,
1121
- sep_style=SeparatorStyle.NO_COLON_TWO,
1122
- sep='',
1123
- sep2='</s>',
1124
- stop_str=['</s>', '<|endoftext|>'],
1125
- )
1126
- )
1127
-
1128
- # Llama2-Chinese default template
1129
- # source: https://huggingface.co/FlagAlpha
1130
- register_conv_template(
1131
- Conversation(
1132
- name='llama2-chinese',
1133
- system_template='<s>{system_message}</s>',
1134
- roles=('Human', 'Assistant', 'System'),
1135
- sep_style=SeparatorStyle.ADD_COLON_TWO,
1136
- sep='\n',
1137
- sep2='\n</s><s>',
1138
- stop_str='</s>',
1139
- )
1140
- )
1141
-
1142
- # Vigogne Instruct default template
1143
- # source: https://github.com/bofenghuang/vigogne
1144
- register_conv_template(
1145
- Conversation(
1146
- name='vigogne_instruct',
1147
- system_template='### System:\n{system_message}\n\n',
1148
- system_message=(
1149
- 'Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière'
1150
- ' précise à la demande.'
1151
- ),
1152
- roles=('### Instruction', '### Response'),
1153
- sep_style=SeparatorStyle.DOLLY,
1154
- sep='\n\n',
1155
- sep2='</s>',
1156
- )
1157
- )
1158
-
1159
- # Vigogne Chat default template
1160
- register_conv_template(
1161
- Conversation(
1162
- name='vigogne_chat_v2',
1163
- system_template='<|system|>: {system_message}',
1164
- system_message=(
1165
- 'Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez'
1166
- ' autant que vous le pouvez.'
1167
- ),
1168
- roles=('<|user|>', '<|assistant|>'),
1169
- sep_style=SeparatorStyle.ADD_COLON_TWO,
1170
- sep='\n',
1171
- sep2='</s>\n',
1172
- stop_str='<|user|>',
1173
- )
1174
- )
1175
-
1176
- register_conv_template(
1177
- Conversation(
1178
- name='vigogne_chat_v3',
1179
- system_template='[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n',
1180
- system_message=(
1181
- 'Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez'
1182
- ' autant que vous le pouvez.'
1183
- ),
1184
- roles=('[INST]', '[/INST]'),
1185
- sep_style=SeparatorStyle.LLAMA2,
1186
- sep=' ',
1187
- sep2=' </s>',
1188
- )
1189
- )
1190
-
1191
- # Falcon 180B chat template
1192
- # source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
1193
- register_conv_template(
1194
- Conversation(
1195
- name='falcon-chat',
1196
- roles=('User', 'Falcon'),
1197
- system_template='System: {system_message}',
1198
- messages=[],
1199
- sep_style=SeparatorStyle.FALCON_CHAT,
1200
- sep='\n',
1201
- sep2='<|endoftext|>',
1202
- stop_str='\nUser:', # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
1203
- )
1204
- )
1205
-
1206
- # Phind template
1207
- # source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
1208
- register_conv_template(
1209
- Conversation(
1210
- name='phind',
1211
- system_message='### System Prompt\nYou are an intelligent programming assistant.',
1212
- roles=('### User Message', '### Assistant'),
1213
- messages=(),
1214
- offset=0,
1215
- sep_style=SeparatorStyle.ADD_COLON_SINGLE,
1216
- sep='\n\n',
1217
- )
1218
- )
1219
-
1220
- # Metharme formatting for Pygmalion models
1221
- # source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
1222
- register_conv_template(
1223
- Conversation(
1224
- name='metharme',
1225
- system_template='<|system|>{system_message}',
1226
- system_message="""Enter RP mode. You shall reply to the user while staying
1227
- in character. Your responses must be detailed, creative, immersive, and drive the scenario
1228
- forward.""",
1229
- roles=('<|user|>', '<|model|>'),
1230
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
1231
- sep='',
1232
- stop_str='<|user|>',
1233
- )
1234
- )
1235
-
1236
- # Zephyr template
1237
- # reference: https://huggingface.co/spaces/HuggingFaceH4/zephyr-playground/blob/main/dialogues.py
1238
- register_conv_template(
1239
- Conversation(
1240
- name='zephyr',
1241
- system_template='<|system|>\n{system_message}',
1242
- roles=('<|user|>', '<|assistant|>'),
1243
- sep_style=SeparatorStyle.CHATML,
1244
- sep='</s>',
1245
- stop_token_ids=[2],
1246
- stop_str='</s>',
1247
- )
1248
- )
1249
-
1250
- # InternVL-ZH template
1251
- register_conv_template(
1252
- Conversation(
1253
- name='internvl_zh',
1254
- system_template='',
1255
- roles=('<human>', '<bot>'),
1256
- sep_style=SeparatorStyle.INTERNVL_ZH,
1257
- sep=' ',
1258
- sep2='</s>',
1259
  )
1260
  )
1261
-
 
2
  Conversation prompt templates.
3
 
4
  We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
  """
7
 
8
  import dataclasses
 
330
  return conv_templates[name].copy()
331
 
332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  register_conv_template(
334
  Conversation(
335
  name='Hermes-2',
 
343
  6,
344
  7,
345
  8,
346
+ ],
347
  stop_str='<|endoftext|>',
348
  )
349
  )
 
365
  )
366
  )
367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
 
 
369
  register_conv_template(
370
  Conversation(
371
+ name='phi3-chat',
372
+ system_template='<|system|>\n{system_message}',
373
+ system_message='You are an AI assistant whose name is Phi-3.',
374
+ roles=('<|user|>\n', '<|assistant|>\n'),
375
+ sep_style=SeparatorStyle.MPT,
376
  sep='<|end|>',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  stop_token_ids=[
378
+ 2,
379
+ 32000,
380
+ 32007
381
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  )
383
  )
 
modeling_intern_vit.py CHANGED
@@ -26,9 +26,9 @@ try:
26
  except: # v2
27
  from flash_attn.flash_attn_interface import \
28
  flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
29
-
30
  from flash_attn.bert_padding import pad_input, unpad_input
31
-
32
  has_flash_attn = True
33
  except:
34
  print('FlashAttention is not installed.')
@@ -47,12 +47,12 @@ class FlashAttention(nn.Module):
47
  attention_dropout: The dropout rate to apply to the attention
48
  (default: 0.0)
49
  """
50
-
51
  def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
52
  super().__init__()
53
  self.softmax_scale = softmax_scale
54
  self.dropout_p = attention_dropout
55
-
56
  def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
57
  max_s=None, need_weights=False):
58
  """Implements the multihead softmax attention.
@@ -65,7 +65,7 @@ class FlashAttention(nn.Module):
65
  assert not need_weights
66
  assert qkv.dtype in [torch.float16, torch.bfloat16]
67
  assert qkv.is_cuda
68
-
69
  if cu_seqlens is None:
70
  batch_size = qkv.shape[0]
71
  seqlen = qkv.shape[1]
@@ -97,7 +97,7 @@ class FlashAttention(nn.Module):
97
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
98
  softmax_scale=self.softmax_scale, causal=causal
99
  )
100
-
101
  return output, None
102
 
103
 
@@ -129,6 +129,12 @@ except Exception:
129
  pass
130
 
131
 
 
 
 
 
 
 
132
  class InternVisionEmbeddings(nn.Module):
133
  def __init__(self, config: InternVisionConfig):
134
  super().__init__()
@@ -154,7 +160,7 @@ class InternVisionEmbeddings(nn.Module):
154
  target_dtype = pos_embed.dtype
155
  pos_embed = pos_embed.float().reshape(
156
  1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
157
- pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
158
  reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
159
  return pos_embed
160
 
@@ -267,11 +273,12 @@ class InternVisionEncoderLayer(nn.Module):
267
  super().__init__()
268
  self.embed_dim = config.hidden_size
269
  self.intermediate_size = config.intermediate_size
 
270
 
271
  self.attn = InternAttention(config)
272
  self.mlp = InternMLP(config)
273
- self.norm1 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
274
- self.norm2 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
275
 
276
  self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
277
  self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
 
26
  except: # v2
27
  from flash_attn.flash_attn_interface import \
28
  flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
29
+
30
  from flash_attn.bert_padding import pad_input, unpad_input
31
+
32
  has_flash_attn = True
33
  except:
34
  print('FlashAttention is not installed.')
 
47
  attention_dropout: The dropout rate to apply to the attention
48
  (default: 0.0)
49
  """
50
+
51
  def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
52
  super().__init__()
53
  self.softmax_scale = softmax_scale
54
  self.dropout_p = attention_dropout
55
+
56
  def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
57
  max_s=None, need_weights=False):
58
  """Implements the multihead softmax attention.
 
65
  assert not need_weights
66
  assert qkv.dtype in [torch.float16, torch.bfloat16]
67
  assert qkv.is_cuda
68
+
69
  if cu_seqlens is None:
70
  batch_size = qkv.shape[0]
71
  seqlen = qkv.shape[1]
 
97
  qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
98
  softmax_scale=self.softmax_scale, causal=causal
99
  )
100
+
101
  return output, None
102
 
103
 
 
129
  pass
130
 
131
 
132
+ NORM2FN = {
133
+ 'rms_norm': InternRMSNorm,
134
+ 'layer_norm': nn.LayerNorm,
135
+ }
136
+
137
+
138
  class InternVisionEmbeddings(nn.Module):
139
  def __init__(self, config: InternVisionConfig):
140
  super().__init__()
 
160
  target_dtype = pos_embed.dtype
161
  pos_embed = pos_embed.float().reshape(
162
  1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
163
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
164
  reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
165
  return pos_embed
166
 
 
273
  super().__init__()
274
  self.embed_dim = config.hidden_size
275
  self.intermediate_size = config.intermediate_size
276
+ self.norm_type = config.norm_type
277
 
278
  self.attn = InternAttention(config)
279
  self.mlp = InternMLP(config)
280
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
281
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
282
 
283
  self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
284
  self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
modeling_internlm2.py CHANGED
@@ -48,6 +48,18 @@ _CONFIG_FOR_DOC = 'InternLM2Config'
48
 
49
  flash_attn_func, flash_attn_varlen_func = None, None
50
  pad_input, index_first_axis, unpad_input = None, None, None
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
 
53
  def _import_flash_attn():
@@ -149,7 +161,7 @@ class InternLM2RotaryEmbedding(nn.Module):
149
 
150
  def _set_cos_sin_cache(self, seq_len, device, dtype):
151
  self.max_seq_len_cached = seq_len
152
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
153
 
154
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
155
  # Different from paper, but it uses a different permutation in order to obtain the same calculation
@@ -178,7 +190,7 @@ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
178
 
179
  def _set_cos_sin_cache(self, seq_len, device, dtype):
180
  self.max_seq_len_cached = seq_len
181
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
182
  t = t / self.scaling_factor
183
 
184
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
@@ -208,7 +220,7 @@ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
208
  inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
209
  self.register_buffer('inv_freq', inv_freq, persistent=False)
210
 
211
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
212
 
213
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
214
  # Different from paper, but it uses a different permutation in order to obtain the same calculation
@@ -697,6 +709,7 @@ class InternLM2PreTrainedModel(PreTrainedModel):
697
  supports_gradient_checkpointing = True
698
  _no_split_modules = ['InternLM2DecoderLayer']
699
  _skip_keys_device_placement = 'past_key_values'
 
700
 
701
  def _init_weights(self, module):
702
  std = self.config.initializer_range
@@ -795,6 +808,9 @@ class InternLM2Model(InternLM2PreTrainedModel):
795
  self.padding_idx = config.pad_token_id
796
  self.vocab_size = config.vocab_size
797
  self.config = config
 
 
 
798
 
799
  self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
800
 
@@ -1082,13 +1098,16 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1082
  output = (logits,) + outputs[1:]
1083
  return (loss,) + output if loss is not None else output
1084
 
1085
- return CausalLMOutputWithPast(
 
1086
  loss=loss,
1087
  logits=logits,
1088
  past_key_values=outputs.past_key_values,
1089
  hidden_states=outputs.hidden_states,
1090
  attentions=outputs.attentions,
1091
  )
 
 
1092
 
1093
  def prepare_inputs_for_generation(
1094
  self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
 
48
 
49
  flash_attn_func, flash_attn_varlen_func = None, None
50
  pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
 
64
 
65
  def _import_flash_attn():
 
161
 
162
  def _set_cos_sin_cache(self, seq_len, device, dtype):
163
  self.max_seq_len_cached = seq_len
164
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
165
 
166
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
167
  # Different from paper, but it uses a different permutation in order to obtain the same calculation
 
190
 
191
  def _set_cos_sin_cache(self, seq_len, device, dtype):
192
  self.max_seq_len_cached = seq_len
193
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
194
  t = t / self.scaling_factor
195
 
196
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
 
220
  inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
221
  self.register_buffer('inv_freq', inv_freq, persistent=False)
222
 
223
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
224
 
225
  freqs = torch.einsum('i,j->ij', t, self.inv_freq)
226
  # Different from paper, but it uses a different permutation in order to obtain the same calculation
 
709
  supports_gradient_checkpointing = True
710
  _no_split_modules = ['InternLM2DecoderLayer']
711
  _skip_keys_device_placement = 'past_key_values'
712
+ _supports_flash_attn_2 = True
713
 
714
  def _init_weights(self, module):
715
  std = self.config.initializer_range
 
808
  self.padding_idx = config.pad_token_id
809
  self.vocab_size = config.vocab_size
810
  self.config = config
811
+ if not has_flash_attn:
812
+ self.config.attn_implementation = 'eager'
813
+ print('Warning: Flash attention is not available, using eager attention instead.')
814
 
815
  self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
816
 
 
1098
  output = (logits,) + outputs[1:]
1099
  return (loss,) + output if loss is not None else output
1100
 
1101
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1102
+ output = CausalLMOutputWithPast(
1103
  loss=loss,
1104
  logits=logits,
1105
  past_key_values=outputs.past_key_values,
1106
  hidden_states=outputs.hidden_states,
1107
  attentions=outputs.attentions,
1108
  )
1109
+ output['logits'] = output['logits'].to(device)
1110
+ return output
1111
 
1112
  def prepare_inputs_for_generation(
1113
  self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
modeling_internvl_chat.py CHANGED
@@ -1,13 +1,13 @@
1
  # --------------------------------------------------------
2
  # InternVL
3
- # Copyright (c) 2023 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
  import warnings
7
  from typing import Any, List, Optional, Tuple, Union
8
 
9
  import torch.utils.checkpoint
10
- from peft import LoraConfig, get_peft_model
11
  from torch import nn
12
  from torch.nn import CrossEntropyLoss
13
  from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
@@ -17,54 +17,30 @@ from transformers.modeling_utils import PreTrainedModel
17
  from transformers.utils import ModelOutput, logging
18
 
19
  from .configuration_internvl_chat import InternVLChatConfig
 
20
  from .modeling_intern_vit import InternVisionModel
21
  from .modeling_internlm2 import InternLM2ForCausalLM
22
 
23
  logger = logging.get_logger(__name__)
24
 
25
 
26
- def window_partition(x, window_size):
27
- """
28
- Args:
29
- x: (B, C, H, W)
30
- window_size (int): window size, assuming square window
31
 
32
- Returns:
33
- windows: (num_windows*B, C, window_size, window_size)
34
- """
35
- B, C, H, W = x.shape
36
- assert H % window_size == 0 and W % window_size == 0, 'H and W must be divisible by window_size'
37
-
38
- x = x.view(B, C, H // window_size, window_size, W // window_size, window_size)
39
- windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size, window_size)
40
- return windows
41
-
42
-
43
- def window_reverse(windows, window_size, H, W):
44
- """
45
- Args:
46
- windows: (num_windows*B, window_size, window_size, C)
47
- window_size (int): Window size
48
- H (int): Height of image
49
- W (int): Width of image
50
-
51
- Returns:
52
- x: (B, H * W, C)
53
- """
54
- B = int(windows.shape[0] / (H * W / window_size / window_size))
55
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
56
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H * W, -1)
57
- return x
58
 
59
 
60
  class InternVLChatModel(PreTrainedModel):
61
  config_class = InternVLChatConfig
62
  main_input_name = 'pixel_values'
63
- _no_split_modules = ['InternVisionEncoderLayer', 'LlamaDecoderLayer', 'LlamaForCausalLM']
64
 
65
  def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
66
  super().__init__(config)
67
 
 
68
  image_size = config.force_image_size or config.vision_config.image_size
69
  patch_size = config.vision_config.patch_size
70
  self.patch_size = patch_size
@@ -72,7 +48,6 @@ class InternVLChatModel(PreTrainedModel):
72
  self.template = config.template
73
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
74
  self.downsample_ratio = config.downsample_ratio
75
- self.image_fold = config.image_fold
76
  self.ps_version = config.ps_version
77
 
78
  logger.info(f'num_image_token: {self.num_image_token}')
@@ -101,44 +76,7 @@ class InternVLChatModel(PreTrainedModel):
101
  nn.Linear(llm_hidden_size, llm_hidden_size)
102
  )
103
 
104
- # if config.force_image_size != config.vision_config.image_size:
105
- # self.vision_model.resize_pos_embeddings(
106
- # old_size=config.vision_config.image_size,
107
- # new_size=config.force_image_size,
108
- # patch_size=config.vision_config.patch_size
109
- # )
110
-
111
  self.img_context_token_id = None
112
- self.neftune_alpha = None
113
-
114
- if config.use_backbone_lora:
115
- self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
116
-
117
- if config.use_llm_lora:
118
- self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
119
-
120
- def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
121
- lora_config = LoraConfig(
122
- r=r,
123
- target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'],
124
- lora_alpha=lora_alpha,
125
- lora_dropout=lora_dropout,
126
- )
127
- self.vision_model = get_peft_model(self.vision_model, lora_config)
128
- self.vision_model.print_trainable_parameters()
129
-
130
- def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
131
- lora_config = LoraConfig(
132
- r=r,
133
- target_modules=['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
134
- 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj'],
135
- lora_alpha=lora_alpha,
136
- lora_dropout=lora_dropout,
137
- task_type='CAUSAL_LM'
138
- )
139
- self.language_model = get_peft_model(self.language_model, lora_config)
140
- self.language_model.enable_input_require_grads()
141
- self.language_model.print_trainable_parameters()
142
 
143
  def forward(
144
  self,
@@ -235,17 +173,7 @@ class InternVLChatModel(PreTrainedModel):
235
  x = x.permute(0, 2, 1, 3).contiguous()
236
  return x
237
 
238
- def noised_embed(self, vit_embeds, noise_alpha=5):
239
- dims = torch.tensor(vit_embeds.size(1) * vit_embeds.size(2))
240
- mag_norm = noise_alpha / torch.sqrt(dims)
241
- noise = torch.zeros_like(vit_embeds).uniform_(-mag_norm, mag_norm)
242
- return vit_embeds + noise
243
-
244
  def extract_feature(self, pixel_values):
245
- if self.image_fold:
246
- image_size = pixel_values.size(-1) # B, C, H, W
247
- pixel_values = window_partition(pixel_values, window_size=image_size // self.image_fold) # 4B, C, H/2, W/2
248
-
249
  if self.select_layer == -1:
250
  vit_embeds = self.vision_model(
251
  pixel_values=pixel_values,
@@ -258,50 +186,94 @@ class InternVLChatModel(PreTrainedModel):
258
  return_dict=True).hidden_states[self.select_layer]
259
  vit_embeds = vit_embeds[:, 1:, :]
260
 
261
- if self.training and self.neftune_alpha is not None:
262
- vit_embeds = self.noised_embed(vit_embeds, self.neftune_alpha)
263
-
264
- if self.image_fold:
265
- vit_embeds = window_reverse(vit_embeds, window_size=image_size // (self.image_fold * self.patch_size),
266
- H=image_size // self.patch_size, W=image_size // self.patch_size)
267
-
268
- # if torch.distributed.get_rank() == 0:
269
- # print("before pixel shuffle:", vit_embeds.shape)
270
  h = w = int(vit_embeds.shape[1] ** 0.5)
271
  vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
272
  vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
273
  vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
274
- # if torch.distributed.get_rank() == 0:
275
- # print("after pixel shuffle:", vit_embeds.shape)
276
  vit_embeds = self.mlp1(vit_embeds)
277
  return vit_embeds
278
 
279
- def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
280
- IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>'):
 
 
 
 
 
 
 
 
281
 
282
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
283
  self.img_context_token_id = img_context_token_id
284
- if tokenizer.convert_tokens_to_ids('<|im_end|>') != 0:
285
- eos_token_id = tokenizer.convert_tokens_to_ids('<|im_end|>') # 92542, InternLM2
286
- else:
287
- eos_token_id = tokenizer.eos_token_id
288
 
289
- from .conversation import get_conv_template
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
  template = get_conv_template(self.template)
292
- image_bs = pixel_values.shape[0]
293
- print(f'dynamic ViT batch size: {image_bs}')
294
- if history is None:
295
- history = []
296
- image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * image_bs + IMG_END_TOKEN
297
- question = image_tokens + '\n' + question
298
- else:
299
- for (old_question, old_answer) in history:
300
- template.append_message(template.roles[0], old_question)
301
- template.append_message(template.roles[1], old_answer)
302
  template.append_message(template.roles[0], question)
303
  template.append_message(template.roles[1], None)
304
  query = template.get_prompt()
 
 
 
 
 
 
 
 
 
305
  model_inputs = tokenizer(query, return_tensors='pt')
306
  input_ids = model_inputs['input_ids'].cuda()
307
  attention_mask = model_inputs['attention_mask'].cuda()
@@ -313,15 +285,16 @@ class InternVLChatModel(PreTrainedModel):
313
  **generation_config
314
  )
315
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
316
- response = response.split('<|im_end|>')[0].strip() # for InternLM2
317
  history.append((question, response))
318
  if return_history:
319
  return response, history
320
  else:
321
- query_to_print = query.replace(image_tokens, '<image>')
322
- print(query_to_print, response)
 
 
323
  return response
324
- return response
325
 
326
  @torch.no_grad()
327
  def generate(
@@ -342,7 +315,6 @@ class InternVLChatModel(PreTrainedModel):
342
  vit_embeds = visual_features
343
  else:
344
  vit_embeds = self.extract_feature(pixel_values)
345
-
346
  input_embeds = self.language_model.get_input_embeddings()(input_ids)
347
  B, N, C = input_embeds.shape
348
  input_embeds = input_embeds.reshape(B * N, C)
@@ -350,7 +322,7 @@ class InternVLChatModel(PreTrainedModel):
350
  input_ids = input_ids.reshape(B * N)
351
  selected = (input_ids == self.img_context_token_id)
352
  assert selected.sum() != 0
353
- input_embeds[selected] = vit_embeds.reshape(-1, C)
354
 
355
  input_embeds = input_embeds.reshape(B, N, C)
356
  else:
 
1
  # --------------------------------------------------------
2
  # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
  import warnings
7
  from typing import Any, List, Optional, Tuple, Union
8
 
9
  import torch.utils.checkpoint
10
+ import transformers
11
  from torch import nn
12
  from torch.nn import CrossEntropyLoss
13
  from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
 
17
  from transformers.utils import ModelOutput, logging
18
 
19
  from .configuration_internvl_chat import InternVLChatConfig
20
+ from .conversation import get_conv_template
21
  from .modeling_intern_vit import InternVisionModel
22
  from .modeling_internlm2 import InternLM2ForCausalLM
23
 
24
  logger = logging.get_logger(__name__)
25
 
26
 
27
+ def version_cmp(v1, v2, op='eq'):
28
+ import operator
 
 
 
29
 
30
+ from packaging import version
31
+ op_func = getattr(operator, op)
32
+ return op_func(version.parse(v1), version.parse(v2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
 
35
  class InternVLChatModel(PreTrainedModel):
36
  config_class = InternVLChatConfig
37
  main_input_name = 'pixel_values'
38
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer']
39
 
40
  def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
41
  super().__init__(config)
42
 
43
+ assert version_cmp(transformers.__version__, '4.36.2', 'ge')
44
  image_size = config.force_image_size or config.vision_config.image_size
45
  patch_size = config.vision_config.patch_size
46
  self.patch_size = patch_size
 
48
  self.template = config.template
49
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
50
  self.downsample_ratio = config.downsample_ratio
 
51
  self.ps_version = config.ps_version
52
 
53
  logger.info(f'num_image_token: {self.num_image_token}')
 
76
  nn.Linear(llm_hidden_size, llm_hidden_size)
77
  )
78
 
 
 
 
 
 
 
 
79
  self.img_context_token_id = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  def forward(
82
  self,
 
173
  x = x.permute(0, 2, 1, 3).contiguous()
174
  return x
175
 
 
 
 
 
 
 
176
  def extract_feature(self, pixel_values):
 
 
 
 
177
  if self.select_layer == -1:
178
  vit_embeds = self.vision_model(
179
  pixel_values=pixel_values,
 
186
  return_dict=True).hidden_states[self.select_layer]
187
  vit_embeds = vit_embeds[:, 1:, :]
188
 
 
 
 
 
 
 
 
 
 
189
  h = w = int(vit_embeds.shape[1] ** 0.5)
190
  vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
191
  vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
192
  vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
 
 
193
  vit_embeds = self.mlp1(vit_embeds)
194
  return vit_embeds
195
 
196
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
197
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
198
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
199
+ if history is not None or return_history:
200
+ print('Now multi-turn chat is not supported in batch_chat.')
201
+ raise NotImplementedError
202
+
203
+ if image_counts is not None:
204
+ num_patches_list = image_counts
205
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
206
 
207
  img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
208
  self.img_context_token_id = img_context_token_id
 
 
 
 
209
 
210
+ if verbose and pixel_values is not None:
211
+ image_bs = pixel_values.shape[0]
212
+ print(f'dynamic ViT batch size: {image_bs}')
213
+
214
+ queries = []
215
+ for idx, num_patches in enumerate(num_patches_list):
216
+ question = questions[idx]
217
+ if pixel_values is not None and '<image>' not in question:
218
+ question = '<image>\n' + question
219
+ template = get_conv_template(self.template)
220
+ template.append_message(template.roles[0], question)
221
+ template.append_message(template.roles[1], None)
222
+ query = template.get_prompt()
223
+
224
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
225
+ query = query.replace('<image>', image_tokens, 1)
226
+ queries.append(query)
227
+
228
+ tokenizer.padding_side = 'left'
229
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
230
+ input_ids = model_inputs['input_ids'].cuda()
231
+ attention_mask = model_inputs['attention_mask'].cuda()
232
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
233
+ generation_config['eos_token_id'] = eos_token_id
234
+ generation_output = self.generate(
235
+ pixel_values=pixel_values,
236
+ input_ids=input_ids,
237
+ attention_mask=attention_mask,
238
+ **generation_config
239
+ )
240
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
241
+ responses = [response.split(template.sep)[0].strip() for response in responses]
242
+ return responses
243
+
244
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
245
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
246
+ verbose=False):
247
+
248
+ if history is None and pixel_values is not None and '<image>' not in question:
249
+ question = '<image>\n' + question
250
+
251
+ if num_patches_list is None:
252
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
253
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
254
+
255
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
256
+ self.img_context_token_id = img_context_token_id
257
 
258
  template = get_conv_template(self.template)
259
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
260
+
261
+ history = [] if history is None else history
262
+ for (old_question, old_answer) in history:
263
+ template.append_message(template.roles[0], old_question)
264
+ template.append_message(template.roles[1], old_answer)
 
 
 
 
265
  template.append_message(template.roles[0], question)
266
  template.append_message(template.roles[1], None)
267
  query = template.get_prompt()
268
+
269
+ if verbose and pixel_values is not None:
270
+ image_bs = pixel_values.shape[0]
271
+ print(f'dynamic ViT batch size: {image_bs}')
272
+
273
+ for num_patches in num_patches_list:
274
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
275
+ query = query.replace('<image>', image_tokens, 1)
276
+
277
  model_inputs = tokenizer(query, return_tensors='pt')
278
  input_ids = model_inputs['input_ids'].cuda()
279
  attention_mask = model_inputs['attention_mask'].cuda()
 
285
  **generation_config
286
  )
287
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
288
+ response = response.split(template.sep)[0].strip()
289
  history.append((question, response))
290
  if return_history:
291
  return response, history
292
  else:
293
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
294
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
295
+ if verbose:
296
+ print(query_to_print, response)
297
  return response
 
298
 
299
  @torch.no_grad()
300
  def generate(
 
315
  vit_embeds = visual_features
316
  else:
317
  vit_embeds = self.extract_feature(pixel_values)
 
318
  input_embeds = self.language_model.get_input_embeddings()(input_ids)
319
  B, N, C = input_embeds.shape
320
  input_embeds = input_embeds.reshape(B * N, C)
 
322
  input_ids = input_ids.reshape(B * N)
323
  selected = (input_ids == self.img_context_token_id)
324
  assert selected.sum() != 0
325
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
326
 
327
  input_embeds = input_embeds.reshape(B, N, C)
328
  else: