czczup commited on
Commit
0284e5f
1 Parent(s): 18c8446

fix compatibility issue for transformers 4.46+

Browse files
configuration_intern_vit.py CHANGED
@@ -3,6 +3,7 @@
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
 
6
  import os
7
  from typing import Union
8
 
 
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
+
7
  import os
8
  from typing import Union
9
 
conversation.py CHANGED
@@ -3,6 +3,8 @@ Conversation prompt templates.
3
 
4
  We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
  If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
 
 
6
  """
7
 
8
  import dataclasses
@@ -344,12 +346,6 @@ register_conv_template(
344
  roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
345
  sep_style=SeparatorStyle.MPT,
346
  sep='<|im_end|>',
347
- stop_token_ids=[
348
- 2,
349
- 6,
350
- 7,
351
- 8,
352
- ],
353
  stop_str='<|endoftext|>',
354
  )
355
  )
@@ -365,11 +361,6 @@ register_conv_template(
365
  roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
366
  sep_style=SeparatorStyle.MPT,
367
  sep='<|im_end|>',
368
- stop_token_ids=[
369
- 2,
370
- 92543,
371
- 92542
372
- ]
373
  )
374
  )
375
 
@@ -384,10 +375,17 @@ register_conv_template(
384
  roles=('<|user|>\n', '<|assistant|>\n'),
385
  sep_style=SeparatorStyle.MPT,
386
  sep='<|end|>',
387
- stop_token_ids=[
388
- 2,
389
- 32000,
390
- 32007
391
- ]
 
 
 
 
 
 
 
392
  )
393
  )
 
3
 
4
  We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
  If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+
7
+ Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
8
  """
9
 
10
  import dataclasses
 
346
  roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
347
  sep_style=SeparatorStyle.MPT,
348
  sep='<|im_end|>',
 
 
 
 
 
 
349
  stop_str='<|endoftext|>',
350
  )
351
  )
 
361
  roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
362
  sep_style=SeparatorStyle.MPT,
363
  sep='<|im_end|>',
 
 
 
 
 
364
  )
365
  )
366
 
 
375
  roles=('<|user|>\n', '<|assistant|>\n'),
376
  sep_style=SeparatorStyle.MPT,
377
  sep='<|end|>',
378
+ )
379
+ )
380
+
381
+
382
+ register_conv_template(
383
+ Conversation(
384
+ name='internvl2_5',
385
+ system_template='<|im_start|>system\n{system_message}',
386
+ system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
387
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
388
+ sep_style=SeparatorStyle.MPT,
389
+ sep='<|im_end|>\n',
390
  )
391
  )
modeling_intern_vit.py CHANGED
@@ -3,6 +3,7 @@
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
 
6
  from typing import Optional, Tuple, Union
7
 
8
  import torch
 
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
+
7
  from typing import Optional, Tuple, Union
8
 
9
  import torch
modeling_internvl_chat.py CHANGED
@@ -3,8 +3,9 @@
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
 
6
  import warnings
7
- from typing import Any, List, Optional, Tuple, Union
8
 
9
  import torch.utils.checkpoint
10
  import transformers
@@ -236,7 +237,7 @@ class InternVLChatModel(PreTrainedModel):
236
  model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
237
  input_ids = model_inputs['input_ids'].to(self.device)
238
  attention_mask = model_inputs['attention_mask'].to(self.device)
239
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
240
  generation_config['eos_token_id'] = eos_token_id
241
  generation_output = self.generate(
242
  pixel_values=pixel_values,
@@ -245,7 +246,7 @@ class InternVLChatModel(PreTrainedModel):
245
  **generation_config
246
  )
247
  responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
248
- responses = [response.split(template.sep)[0].strip() for response in responses]
249
  return responses
250
 
251
  def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
@@ -264,7 +265,7 @@ class InternVLChatModel(PreTrainedModel):
264
 
265
  template = get_conv_template(self.template)
266
  template.system_message = self.system_message
267
- eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
268
 
269
  history = [] if history is None else history
270
  for (old_question, old_answer) in history:
@@ -293,7 +294,7 @@ class InternVLChatModel(PreTrainedModel):
293
  **generation_config
294
  )
295
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
296
- response = response.split(template.sep)[0].strip()
297
  history.append((question, response))
298
  if return_history:
299
  return response, history
@@ -313,7 +314,6 @@ class InternVLChatModel(PreTrainedModel):
313
  visual_features: Optional[torch.FloatTensor] = None,
314
  generation_config: Optional[GenerationConfig] = None,
315
  output_hidden_states: Optional[bool] = None,
316
- return_dict: Optional[bool] = None,
317
  **generate_kwargs,
318
  ) -> torch.LongTensor:
319
 
@@ -341,7 +341,6 @@ class InternVLChatModel(PreTrainedModel):
341
  attention_mask=attention_mask,
342
  generation_config=generation_config,
343
  output_hidden_states=output_hidden_states,
344
- return_dict=return_dict,
345
  use_cache=True,
346
  **generate_kwargs,
347
  )
 
3
  # Copyright (c) 2024 OpenGVLab
4
  # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
+
7
  import warnings
8
+ from typing import List, Optional, Tuple, Union
9
 
10
  import torch.utils.checkpoint
11
  import transformers
 
237
  model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
238
  input_ids = model_inputs['input_ids'].to(self.device)
239
  attention_mask = model_inputs['attention_mask'].to(self.device)
240
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
241
  generation_config['eos_token_id'] = eos_token_id
242
  generation_output = self.generate(
243
  pixel_values=pixel_values,
 
246
  **generation_config
247
  )
248
  responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
249
+ responses = [response.split(template.sep.strip())[0].strip() for response in responses]
250
  return responses
251
 
252
  def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
 
265
 
266
  template = get_conv_template(self.template)
267
  template.system_message = self.system_message
268
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
269
 
270
  history = [] if history is None else history
271
  for (old_question, old_answer) in history:
 
294
  **generation_config
295
  )
296
  response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
297
+ response = response.split(template.sep.strip())[0].strip()
298
  history.append((question, response))
299
  if return_history:
300
  return response, history
 
314
  visual_features: Optional[torch.FloatTensor] = None,
315
  generation_config: Optional[GenerationConfig] = None,
316
  output_hidden_states: Optional[bool] = None,
 
317
  **generate_kwargs,
318
  ) -> torch.LongTensor:
319
 
 
341
  attention_mask=attention_mask,
342
  generation_config=generation_config,
343
  output_hidden_states=output_hidden_states,
 
344
  use_cache=True,
345
  **generate_kwargs,
346
  )