czczup commited on
Commit
9365737
1 Parent(s): 000f0c7

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. modeling_internvl_chat.py +3 -1
README.md CHANGED
@@ -2,7 +2,7 @@
2
  license: mit
3
  pipeline_tag: image-text-to-text
4
  library_name: transformers
5
- base_model:
6
  - OpenGVLab/InternViT-300M-448px
7
  - Qwen/Qwen2-0.5B-Instruct
8
  base_model_relation: merge
 
2
  license: mit
3
  pipeline_tag: image-text-to-text
4
  library_name: transformers
5
+ base_model:
6
  - OpenGVLab/InternViT-300M-448px
7
  - Qwen/Qwen2-0.5B-Instruct
8
  base_model_relation: merge
modeling_internvl_chat.py CHANGED
@@ -37,7 +37,7 @@ class InternVLChatModel(PreTrainedModel):
37
  _supports_flash_attn_2 = True
38
  _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
39
 
40
- def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
41
  super().__init__(config)
42
 
43
  assert version_cmp(transformers.__version__, '4.37.0', 'ge')
@@ -49,6 +49,8 @@ class InternVLChatModel(PreTrainedModel):
49
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
50
  self.downsample_ratio = config.downsample_ratio
51
  self.ps_version = config.ps_version
 
 
52
 
53
  logger.info(f'num_image_token: {self.num_image_token}')
54
  logger.info(f'ps_version: {self.ps_version}')
 
37
  _supports_flash_attn_2 = True
38
  _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
39
 
40
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
41
  super().__init__(config)
42
 
43
  assert version_cmp(transformers.__version__, '4.37.0', 'ge')
 
49
  self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
50
  self.downsample_ratio = config.downsample_ratio
51
  self.ps_version = config.ps_version
52
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
53
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
54
 
55
  logger.info(f'num_image_token: {self.num_image_token}')
56
  logger.info(f'ps_version: {self.ps_version}')