fix compatibility issue for transformers 4.46+
Browse files- configuration_internvl_chat.py +3 -3
- conversation.py +1 -1
configuration_internvl_chat.py
CHANGED
@@ -46,12 +46,12 @@ class InternVLChatConfig(PretrainedConfig):
|
|
46 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
47 |
|
48 |
self.vision_config = InternVisionConfig(**vision_config)
|
49 |
-
if llm_config.get('architectures'
|
50 |
self.llm_config = LlamaConfig(**llm_config)
|
51 |
-
elif llm_config.get('architectures'
|
52 |
self.llm_config = Qwen2Config(**llm_config)
|
53 |
else:
|
54 |
-
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
55 |
self.use_backbone_lora = use_backbone_lora
|
56 |
self.use_llm_lora = use_llm_lora
|
57 |
self.select_layer = select_layer
|
|
|
46 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
47 |
|
48 |
self.vision_config = InternVisionConfig(**vision_config)
|
49 |
+
if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
|
50 |
self.llm_config = LlamaConfig(**llm_config)
|
51 |
+
elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
|
52 |
self.llm_config = Qwen2Config(**llm_config)
|
53 |
else:
|
54 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get(['architectures'])[0]))
|
55 |
self.use_backbone_lora = use_backbone_lora
|
56 |
self.use_llm_lora = use_llm_lora
|
57 |
self.select_layer = select_layer
|
conversation.py
CHANGED
@@ -9,7 +9,7 @@ Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation
|
|
9 |
|
10 |
import dataclasses
|
11 |
from enum import IntEnum, auto
|
12 |
-
from typing import
|
13 |
|
14 |
|
15 |
class SeparatorStyle(IntEnum):
|
|
|
9 |
|
10 |
import dataclasses
|
11 |
from enum import IntEnum, auto
|
12 |
+
from typing import Dict, List, Tuple, Union
|
13 |
|
14 |
|
15 |
class SeparatorStyle(IntEnum):
|