czczup commited on
Commit
d9a286a
β€’
1 Parent(s): ee0dcac

fix compatibility issue for transformers 4.46+

Browse files
Files changed (3) hide show
  1. README.md +9 -2
  2. config.json +1 -1
  3. configuration_internvl_chat.py +2 -2
README.md CHANGED
@@ -13,11 +13,12 @@ tags:
13
  - custom_code
14
  base_model: OpenGVLab/InternVL2-Llama3-76B
15
  base_model_relation: quantized
 
16
  ---
17
 
18
  # InternVL2-Llama3-76B-AWQ
19
 
20
- [\[πŸ“‚ GitHub\]](https://github.com/OpenGVLab/InternVL) [\[πŸ†• Blog\]](https://internvl.github.io/blog/) [\[πŸ“œ InternVL 1.0 Paper\]](https://arxiv.org/abs/2312.14238) [\[πŸ“œ InternVL 1.5 Report\]](https://arxiv.org/abs/2404.16821)
21
 
22
  [\[πŸ—¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[πŸ€— HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[πŸš€ Quick Start\]](#quick-start) [\[πŸ“– 中文解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[πŸ“– Documents\]](https://internvl.readthedocs.io/en/latest/)
23
 
@@ -42,7 +43,7 @@ LMDeploy supports the following NVIDIA GPU for W4A16 inference:
42
  Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
43
 
44
  ```shell
45
- pip install lmdeploy==0.5.3
46
  ```
47
 
48
  This article comprises the following sections:
@@ -123,6 +124,12 @@ This project is released under the MIT license, while Llama3 is licensed under t
123
  If you find this project useful in your research, please consider citing:
124
 
125
  ```BibTeX
 
 
 
 
 
 
126
  @article{chen2023internvl,
127
  title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
128
  author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
 
13
  - custom_code
14
  base_model: OpenGVLab/InternVL2-Llama3-76B
15
  base_model_relation: quantized
16
+ new_version: OpenGVLab/InternVL2_5-78B-AWQ
17
  ---
18
 
19
  # InternVL2-Llama3-76B-AWQ
20
 
21
+ [\[πŸ“‚ GitHub\]](https://github.com/OpenGVLab/InternVL) [\[πŸ†• Blog\]](https://internvl.github.io/blog/) [\[πŸ“œ InternVL 1.0\]](https://arxiv.org/abs/2312.14238) [\[πŸ“œ InternVL 1.5\]](https://arxiv.org/abs/2404.16821) [\[πŸ“œ Mini-InternVL\]](https://arxiv.org/abs/2410.16261)
22
 
23
  [\[πŸ—¨οΈ Chat Demo\]](https://internvl.opengvlab.com/) [\[πŸ€— HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[πŸš€ Quick Start\]](#quick-start) [\[πŸ“– 中文解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[πŸ“– Documents\]](https://internvl.readthedocs.io/en/latest/)
24
 
 
43
  Before proceeding with the quantization and inference, please ensure that lmdeploy is installed.
44
 
45
  ```shell
46
+ pip install lmdeploy>=0.5.3
47
  ```
48
 
49
  This article comprises the following sections:
 
124
  If you find this project useful in your research, please consider citing:
125
 
126
  ```BibTeX
127
+ @article{gao2024mini,
128
+ title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
129
+ author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
130
+ journal={arXiv preprint arXiv:2410.16261},
131
+ year={2024}
132
+ }
133
  @article{chen2023internvl,
134
  title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks},
135
  author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng},
config.json CHANGED
@@ -17,9 +17,9 @@
17
  "architectures": [
18
  "LlamaForCausalLM"
19
  ],
 
20
  "attention_bias": false,
21
  "attention_dropout": 0.0,
22
- "attn_implementation": "eager",
23
  "bad_words_ids": null,
24
  "begin_suppress_tokens": null,
25
  "bos_token_id": 128000,
 
17
  "architectures": [
18
  "LlamaForCausalLM"
19
  ],
20
+ "attn_implementation": "eager",
21
  "attention_bias": false,
22
  "attention_dropout": 0.0,
 
23
  "bad_words_ids": null,
24
  "begin_suppress_tokens": null,
25
  "bos_token_id": 128000,
configuration_internvl_chat.py CHANGED
@@ -38,11 +38,11 @@ class InternVLChatConfig(PretrainedConfig):
38
  super().__init__(**kwargs)
39
 
40
  if vision_config is None:
41
- vision_config = {}
42
  logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
43
 
44
  if llm_config is None:
45
- llm_config = {}
46
  logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
47
 
48
  self.vision_config = InternVisionConfig(**vision_config)
 
38
  super().__init__(**kwargs)
39
 
40
  if vision_config is None:
41
+ vision_config = {'architectures': ['InternVisionModel']}
42
  logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
43
 
44
  if llm_config is None:
45
+ llm_config = {'architectures': ['LlamaForCausalLM']}
46
  logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
47
 
48
  self.vision_config = InternVisionConfig(**vision_config)