Upload folder using huggingface_hub
Browse files- README.md +156 -3
- added_tokens.json +24 -0
- config.json +220 -0
- constants.py +12 -0
- conversation.py +592 -0
- generation_config.json +14 -0
- merges.txt +0 -0
- mm_projector_builder.py +165 -0
- mm_utils.py +855 -0
- model.safetensors +3 -0
- modeling_qwen2_flash.py +1545 -0
- modeling_videochat_flash.py +713 -0
- special_tokens_map.json +31 -0
- tokenizer.json +0 -0
- tokenizer_config.json +208 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
- vision_tower_builder.py +622 -0
- vocab.json +0 -0
README.md
CHANGED
@@ -1,3 +1,156 @@
|
|
1 |
-
---
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
library_name: transformers
|
5 |
+
license: apache-2.0
|
6 |
+
metrics:
|
7 |
+
- accuracy
|
8 |
+
tags:
|
9 |
+
- multimodal
|
10 |
+
pipeline_tag: video-text-to-text
|
11 |
+
model-index:
|
12 |
+
- name: VideoChat-Flash-Qwen2_5-2B_res448
|
13 |
+
results:
|
14 |
+
- task:
|
15 |
+
type: multimodal
|
16 |
+
dataset:
|
17 |
+
name: MLVU
|
18 |
+
type: mlvu
|
19 |
+
metrics:
|
20 |
+
- type: accuracy
|
21 |
+
value: 65.7
|
22 |
+
name: accuracy
|
23 |
+
verified: true
|
24 |
+
- task:
|
25 |
+
type: multimodal
|
26 |
+
dataset:
|
27 |
+
name: MVBench
|
28 |
+
type: mvbench
|
29 |
+
metrics:
|
30 |
+
- type: accuracy
|
31 |
+
value: 70.0
|
32 |
+
name: accuracy
|
33 |
+
verified: true
|
34 |
+
- task:
|
35 |
+
type: multimodal
|
36 |
+
dataset:
|
37 |
+
name: PercepTest
|
38 |
+
type: percepTest
|
39 |
+
metrics:
|
40 |
+
- type: accuracy
|
41 |
+
value: 70.5
|
42 |
+
name: accuracy
|
43 |
+
verified: true
|
44 |
+
- task:
|
45 |
+
type: multimodal
|
46 |
+
dataset:
|
47 |
+
name: LongVideoBench
|
48 |
+
type: longvideobench
|
49 |
+
metrics:
|
50 |
+
- type: accuracy
|
51 |
+
value: 58.3
|
52 |
+
name: accuracy
|
53 |
+
verified: true
|
54 |
+
- task:
|
55 |
+
type: multimodal
|
56 |
+
dataset:
|
57 |
+
name: VideoMME (wo sub)
|
58 |
+
type: videomme
|
59 |
+
metrics:
|
60 |
+
- type: accuracy
|
61 |
+
value: 57.0
|
62 |
+
name: accuracy
|
63 |
+
verified: true
|
64 |
+
- task:
|
65 |
+
type: multimodal
|
66 |
+
dataset:
|
67 |
+
name: LVBench
|
68 |
+
type: lvbench
|
69 |
+
metrics:
|
70 |
+
- type: accuracy
|
71 |
+
value: 42.9
|
72 |
+
name: accuracy
|
73 |
+
verified: true
|
74 |
+
|
75 |
+
|
76 |
+
---
|
77 |
+
|
78 |
+
# 🦜VideoChat-Flash-Qwen2_5-2B_res448⚡
|
79 |
+
[\[📰 Blog\]](https://internvideo.github.io/blog/2024-12-31-VideoChat-Flash) [\[📂 GitHub\]](https://github.com/OpenGVLab/VideoChat-Flash) [\[📜 Tech Report\]](https://www.arxiv.org/abs/2501.00574) [\[🗨️ Chat Demo\]](https://huggingface.co/spaces/OpenGVLab/VideoChat-Flash)
|
80 |
+
|
81 |
+
VideoChat-Flash-2B is constructed upon UMT-L (300M) and Qwen2_5-2B, employing only **16 tokens per frame**. By leveraging Yarn to extend the context window to 128k (Qwen2's native context window is 32k), our model supports input sequences of up to approximately **10,000 frames**.
|
82 |
+
|
83 |
+
> Note: Due to a predominantly English training corpus, the model only exhibits basic Chinese comprehension, to ensure optimal performance, using English for interaction is recommended.
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
## 📈 Performance
|
88 |
+
| Model | MVBench | LongVideoBench | VideoMME(w/o sub)|
|
89 |
+
| --- | --- | --- | --- |
|
90 |
+
|[VideoChat-Flash-Qwen2_5-2B@448](https://huggingface.co/OpenGVLab/VideoChat-Flash-Qwen2_5-2B_res448)| 70.0 | 58.3 | 57.0|
|
91 |
+
|[VideoChat-Flash-Qwen2-7B@224](https://huggingface.co/OpenGVLab/VideoChat-Flash-Qwen2-7B_res224) | 73.2 | 64.2 | 64.0 |
|
92 |
+
|[VideoChat-Flash-Qwen2-7B@448](https://huggingface.co/OpenGVLab/VideoChat-Flash-Qwen2-7B_res448)| 74.0| 64.7 | 65.3|
|
93 |
+
|
94 |
+
## 🚀 How to use the model
|
95 |
+
|
96 |
+
|
97 |
+
### Generation
|
98 |
+
|
99 |
+
We provide the simple generation process for using our model. For more details, you could refer to [Github](https://github.com/LLaVA-VL/LLaVA-NeXT).
|
100 |
+
|
101 |
+
```python
|
102 |
+
from transformers import AutoModel, AutoTokenizer
|
103 |
+
|
104 |
+
# model setting
|
105 |
+
model_path = 'OpenGVLab/VideoChat-Flash-Qwen2_5-2B_res448'
|
106 |
+
|
107 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
108 |
+
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
|
109 |
+
image_processor = model.get_vision_tower().image_processor
|
110 |
+
|
111 |
+
mm_llm_compress = False # use the global compress or not
|
112 |
+
if mm_llm_compress:
|
113 |
+
model.config.mm_llm_compress = True
|
114 |
+
model.config.llm_compress_type = "uniform0_attention"
|
115 |
+
model.config.llm_compress_layer_list = [4, 18]
|
116 |
+
model.config.llm_image_token_ratio_list = [1, 0.75, 0.25]
|
117 |
+
else:
|
118 |
+
model.config.mm_llm_compress = True
|
119 |
+
|
120 |
+
# evaluation setting
|
121 |
+
max_num_frames = 512
|
122 |
+
generation_config = dict(
|
123 |
+
do_sample=False,
|
124 |
+
temperature=0.0,
|
125 |
+
max_new_tokens=1024,
|
126 |
+
top_p=0.1,
|
127 |
+
num_beams=1
|
128 |
+
)
|
129 |
+
|
130 |
+
video_path = "your_video.mp4"
|
131 |
+
|
132 |
+
# single-turn conversation
|
133 |
+
question1 = "Describe this video in detail."
|
134 |
+
output1, chat_history = model.chat(video_path=video_path, tokenizer=tokenizer, user_prompt=question1, return_history=True, max_num_frames=max_num_frames, generation_config=generation_config)
|
135 |
+
|
136 |
+
print(output1)
|
137 |
+
|
138 |
+
# multi-turn conversation
|
139 |
+
question2 = "How many people appear in the video?"
|
140 |
+
output2, chat_history = model.chat(video_path=video_path, tokenizer=tokenizer, user_prompt=question2, chat_history=chat_history, return_history=True, max_num_frames=max_num_frames, generation_config=generation_config)
|
141 |
+
|
142 |
+
print(output2)
|
143 |
+
```
|
144 |
+
|
145 |
+
## ✏️ Citation
|
146 |
+
|
147 |
+
```bibtex
|
148 |
+
|
149 |
+
@article{li2024videochatflash,
|
150 |
+
title={VideoChat-Flash: Hierarchical Compression for Long-Context Video Modeling},
|
151 |
+
author={Li, Xinhao and Wang, Yi and Yu, Jiashuo and Zeng, Xiangyu and Zhu, Yuhan and Huang, Haian and Gao, Jianfei and Li, Kunchang and He, Yinan and Wang, Chenting and others},
|
152 |
+
journal={arXiv preprint arXiv:2501.00574},
|
153 |
+
year={2024}
|
154 |
+
}
|
155 |
+
|
156 |
+
```
|
added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
config.json
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"VideoChatFlashQwenForCausalLM"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "modeling_videochat_flash.VideoChatFlashQwenConfig",
|
7 |
+
"AutoModel": "modeling_videochat_flash.VideoChatFlashQwenForCausalLM"
|
8 |
+
},
|
9 |
+
"attention_dropout": 0.0,
|
10 |
+
"bos_token_id": 151643,
|
11 |
+
"eos_token_id": 151645,
|
12 |
+
"frame_aspect_ratio": "square",
|
13 |
+
"frame_grid_pinpoints": null,
|
14 |
+
"hidden_act": "silu",
|
15 |
+
"hidden_size": 1536,
|
16 |
+
"image_aspect_ratio": "anyres_nopad",
|
17 |
+
"image_crop_resolution": null,
|
18 |
+
"image_grid_pinpoints": [
|
19 |
+
[
|
20 |
+
448,
|
21 |
+
448
|
22 |
+
],
|
23 |
+
[
|
24 |
+
448,
|
25 |
+
896
|
26 |
+
],
|
27 |
+
[
|
28 |
+
448,
|
29 |
+
1344
|
30 |
+
],
|
31 |
+
[
|
32 |
+
448,
|
33 |
+
1792
|
34 |
+
],
|
35 |
+
[
|
36 |
+
448,
|
37 |
+
2240
|
38 |
+
],
|
39 |
+
[
|
40 |
+
448,
|
41 |
+
2688
|
42 |
+
],
|
43 |
+
[
|
44 |
+
896,
|
45 |
+
448
|
46 |
+
],
|
47 |
+
[
|
48 |
+
896,
|
49 |
+
896
|
50 |
+
],
|
51 |
+
[
|
52 |
+
896,
|
53 |
+
1344
|
54 |
+
],
|
55 |
+
[
|
56 |
+
896,
|
57 |
+
1792
|
58 |
+
],
|
59 |
+
[
|
60 |
+
896,
|
61 |
+
2240
|
62 |
+
],
|
63 |
+
[
|
64 |
+
896,
|
65 |
+
2688
|
66 |
+
],
|
67 |
+
[
|
68 |
+
1344,
|
69 |
+
448
|
70 |
+
],
|
71 |
+
[
|
72 |
+
1344,
|
73 |
+
896
|
74 |
+
],
|
75 |
+
[
|
76 |
+
1344,
|
77 |
+
1344
|
78 |
+
],
|
79 |
+
[
|
80 |
+
1344,
|
81 |
+
1792
|
82 |
+
],
|
83 |
+
[
|
84 |
+
1344,
|
85 |
+
2240
|
86 |
+
],
|
87 |
+
[
|
88 |
+
1344,
|
89 |
+
2688
|
90 |
+
],
|
91 |
+
[
|
92 |
+
1792,
|
93 |
+
448
|
94 |
+
],
|
95 |
+
[
|
96 |
+
1792,
|
97 |
+
896
|
98 |
+
],
|
99 |
+
[
|
100 |
+
1792,
|
101 |
+
1344
|
102 |
+
],
|
103 |
+
[
|
104 |
+
1792,
|
105 |
+
1792
|
106 |
+
],
|
107 |
+
[
|
108 |
+
1792,
|
109 |
+
2240
|
110 |
+
],
|
111 |
+
[
|
112 |
+
1792,
|
113 |
+
2688
|
114 |
+
],
|
115 |
+
[
|
116 |
+
2240,
|
117 |
+
448
|
118 |
+
],
|
119 |
+
[
|
120 |
+
2240,
|
121 |
+
896
|
122 |
+
],
|
123 |
+
[
|
124 |
+
2240,
|
125 |
+
1344
|
126 |
+
],
|
127 |
+
[
|
128 |
+
2240,
|
129 |
+
1792
|
130 |
+
],
|
131 |
+
[
|
132 |
+
2240,
|
133 |
+
2240
|
134 |
+
],
|
135 |
+
[
|
136 |
+
2240,
|
137 |
+
2688
|
138 |
+
],
|
139 |
+
[
|
140 |
+
2688,
|
141 |
+
448
|
142 |
+
],
|
143 |
+
[
|
144 |
+
2688,
|
145 |
+
896
|
146 |
+
],
|
147 |
+
[
|
148 |
+
2688,
|
149 |
+
1344
|
150 |
+
],
|
151 |
+
[
|
152 |
+
2688,
|
153 |
+
1792
|
154 |
+
],
|
155 |
+
[
|
156 |
+
2688,
|
157 |
+
2240
|
158 |
+
],
|
159 |
+
[
|
160 |
+
2688,
|
161 |
+
2688
|
162 |
+
]
|
163 |
+
],
|
164 |
+
"image_split_resolution": null,
|
165 |
+
"initializer_range": 0.02,
|
166 |
+
"intermediate_size": 8960,
|
167 |
+
"llm_compress_layer_list": [
|
168 |
+
24
|
169 |
+
],
|
170 |
+
"llm_compress_type": "attention",
|
171 |
+
"llm_image_token_ratio_list": [
|
172 |
+
1.0,
|
173 |
+
0.5
|
174 |
+
],
|
175 |
+
"max_num_pixels": 14745600000,
|
176 |
+
"max_position_embeddings": 32768,
|
177 |
+
"max_window_layers": 21,
|
178 |
+
"min_slow_num_frames": 4,
|
179 |
+
"mm_close_init": false,
|
180 |
+
"mm_hidden_size": 1024,
|
181 |
+
"mm_llm_compress": false,
|
182 |
+
"mm_local_num_frames": 4,
|
183 |
+
"mm_newline_position": "nothing",
|
184 |
+
"mm_num_compress_latents": 128,
|
185 |
+
"mm_num_compress_query_type": "learnable",
|
186 |
+
"mm_patch_merge_type": "spatial_nopad",
|
187 |
+
"mm_pos_num_frames": 8,
|
188 |
+
"mm_projector_lr": null,
|
189 |
+
"mm_projector_type": "tome16_mlp_hd64",
|
190 |
+
"mm_resampler_type": null,
|
191 |
+
"mm_spatial_pool_mode": "bilinear",
|
192 |
+
"mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
|
193 |
+
"mm_use_im_patch_token": false,
|
194 |
+
"mm_use_im_start_end": false,
|
195 |
+
"mm_vision_select_feature": "patch",
|
196 |
+
"mm_vision_select_layer": -2,
|
197 |
+
"mm_vision_tower": "umt-hd-large",
|
198 |
+
"mm_vision_tower_lr": 2e-06,
|
199 |
+
"model_type": "qwen2",
|
200 |
+
"num_attention_heads": 12,
|
201 |
+
"num_hidden_layers": 28,
|
202 |
+
"num_key_value_heads": 2,
|
203 |
+
"pos_skipping_range": 4096,
|
204 |
+
"rms_norm_eps": 1e-06,
|
205 |
+
"rope_scaling": null,
|
206 |
+
"rope_theta": 1000000.0,
|
207 |
+
"sliding_window": 32768,
|
208 |
+
"tie_word_embeddings": true,
|
209 |
+
"tokenizer_model_max_length": 32768,
|
210 |
+
"tokenizer_padding_side": "right",
|
211 |
+
"torch_dtype": "bfloat16",
|
212 |
+
"transformers_version": "4.39.2",
|
213 |
+
"use_cache": true,
|
214 |
+
"use_mm_proj": true,
|
215 |
+
"use_pos_skipping": false,
|
216 |
+
"use_sliding_window": false,
|
217 |
+
"vision_encode_type": "video_image",
|
218 |
+
"vision_tower_pretrained": null,
|
219 |
+
"vocab_size": 151936
|
220 |
+
}
|
constants.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = 30
|
2 |
+
WORKER_HEART_BEAT_INTERVAL = 15
|
3 |
+
|
4 |
+
LOGDIR = "."
|
5 |
+
|
6 |
+
# Model Constants
|
7 |
+
IGNORE_INDEX = -100
|
8 |
+
IMAGE_TOKEN_INDEX = -200
|
9 |
+
DEFAULT_IMAGE_TOKEN = "<image>"
|
10 |
+
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
11 |
+
DEFAULT_IM_START_TOKEN = "<im_start>"
|
12 |
+
DEFAULT_IM_END_TOKEN = "<im_end>"
|
conversation.py
ADDED
@@ -0,0 +1,592 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from enum import auto, Enum
|
3 |
+
from typing import List, Any, Dict, Union, Tuple
|
4 |
+
import re
|
5 |
+
import base64
|
6 |
+
from io import BytesIO
|
7 |
+
from PIL import Image
|
8 |
+
from transformers import AutoTokenizer
|
9 |
+
|
10 |
+
|
11 |
+
class SeparatorStyle(Enum):
|
12 |
+
"""Different separator style."""
|
13 |
+
|
14 |
+
SINGLE = auto()
|
15 |
+
TWO = auto()
|
16 |
+
MPT = auto()
|
17 |
+
PLAIN = auto()
|
18 |
+
CHATML = auto()
|
19 |
+
LLAMA_2 = auto()
|
20 |
+
LLAMA_3 = auto()
|
21 |
+
QWEN = auto()
|
22 |
+
GEMMA = auto()
|
23 |
+
|
24 |
+
|
25 |
+
@dataclasses.dataclass
|
26 |
+
class Conversation:
|
27 |
+
"""A class that keeps all conversation history."""
|
28 |
+
|
29 |
+
system: str
|
30 |
+
roles: List[str]
|
31 |
+
messages: List[List[str]]
|
32 |
+
offset: int
|
33 |
+
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
34 |
+
sep: str = "###"
|
35 |
+
sep2: str = None
|
36 |
+
version: str = "Unknown"
|
37 |
+
|
38 |
+
tokenizer_id: str = ""
|
39 |
+
tokenizer: Any = None
|
40 |
+
# Stop criteria (the default one is EOS token)
|
41 |
+
stop_str: Union[str, List[str]] = None
|
42 |
+
# Stops generation if meeting any token in this list
|
43 |
+
stop_token_ids: List[int] = None
|
44 |
+
|
45 |
+
skip_next: bool = False
|
46 |
+
|
47 |
+
def get_prompt(self):
|
48 |
+
messages = self.messages
|
49 |
+
if len(messages) > 0 and type(messages[0][1]) is tuple:
|
50 |
+
messages = self.messages.copy()
|
51 |
+
init_role, init_msg = messages[0].copy()
|
52 |
+
init_msg = init_msg[0]
|
53 |
+
if "mmtag" in self.version:
|
54 |
+
init_msg = init_msg.replace("<image>", "").strip()
|
55 |
+
messages[0] = (init_role, init_msg)
|
56 |
+
messages.insert(0, (self.roles[0], "<Image><image></Image>"))
|
57 |
+
messages.insert(1, (self.roles[1], "Received."))
|
58 |
+
elif not init_msg.startswith("<image>"):
|
59 |
+
init_msg = init_msg.replace("<image>", "").strip()
|
60 |
+
messages[0] = (init_role, "<image>\n" + init_msg)
|
61 |
+
else:
|
62 |
+
messages[0] = (init_role, init_msg)
|
63 |
+
|
64 |
+
if self.sep_style == SeparatorStyle.SINGLE:
|
65 |
+
ret = self.system + self.sep
|
66 |
+
for role, message in messages:
|
67 |
+
if message:
|
68 |
+
if type(message) is tuple:
|
69 |
+
message, _, _ = message
|
70 |
+
ret += role + ": " + message + self.sep
|
71 |
+
else:
|
72 |
+
ret += role + ":"
|
73 |
+
|
74 |
+
elif self.sep_style == SeparatorStyle.TWO:
|
75 |
+
seps = [self.sep, self.sep2]
|
76 |
+
ret = self.system + seps[0]
|
77 |
+
for i, (role, message) in enumerate(messages):
|
78 |
+
if message:
|
79 |
+
if type(message) is tuple:
|
80 |
+
message, _, _ = message
|
81 |
+
ret += role + ": " + message + seps[i % 2]
|
82 |
+
else:
|
83 |
+
ret += role + ":"
|
84 |
+
|
85 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
86 |
+
ret = "" if self.system == "" else self.system + self.sep + "\n"
|
87 |
+
for role, message in messages:
|
88 |
+
if message:
|
89 |
+
if type(message) is tuple:
|
90 |
+
message, images, _ = message
|
91 |
+
message = "<image>" * len(images) + message
|
92 |
+
ret += role + "\n" + message + self.sep + "\n"
|
93 |
+
else:
|
94 |
+
ret += role + "\n"
|
95 |
+
return ret
|
96 |
+
|
97 |
+
elif self.sep_style == SeparatorStyle.LLAMA_3:
|
98 |
+
chat_template_messages = [{"role": "system", "content": self.system}]
|
99 |
+
for role, message in messages:
|
100 |
+
if message:
|
101 |
+
if type(message) is tuple:
|
102 |
+
message, images = message
|
103 |
+
message = "<image>" * len(images) + message
|
104 |
+
chat_template_messages.append({"role": role, "content": message})
|
105 |
+
|
106 |
+
# print(chat_template_messages)
|
107 |
+
return self.tokenizer.apply_chat_template(chat_template_messages, tokenize=False, add_generation_prompt=True)
|
108 |
+
# ret = "" if self.system == "" else self.system + self.sep + "\n"
|
109 |
+
# for role, message in messages:
|
110 |
+
# if message:
|
111 |
+
# if type(message) is tuple:
|
112 |
+
# message, images = message
|
113 |
+
# message = "<image>" * len(images) + message
|
114 |
+
# ret += role + "\n" + message + self.sep + "\n"
|
115 |
+
# else:
|
116 |
+
# ret += role + "\n"
|
117 |
+
# return ret
|
118 |
+
|
119 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
120 |
+
ret = self.system + self.sep
|
121 |
+
for role, message in messages:
|
122 |
+
if message:
|
123 |
+
if type(message) is tuple:
|
124 |
+
message, _, _ = message
|
125 |
+
ret += role + message + self.sep
|
126 |
+
else:
|
127 |
+
ret += role
|
128 |
+
|
129 |
+
elif self.sep_style == SeparatorStyle.GEMMA:
|
130 |
+
ret = ""
|
131 |
+
for i, (role, message) in enumerate(messages):
|
132 |
+
assert role == self.roles[i % 2], "Conversation should alternate user/assistant/user/assistant/..."
|
133 |
+
if message:
|
134 |
+
if type(message) is tuple:
|
135 |
+
message, _, _ = message
|
136 |
+
ret += role + message + self.sep
|
137 |
+
else:
|
138 |
+
ret += role
|
139 |
+
|
140 |
+
elif self.sep_style == SeparatorStyle.LLAMA_2:
|
141 |
+
wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
|
142 |
+
wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
|
143 |
+
ret = ""
|
144 |
+
|
145 |
+
for i, (role, message) in enumerate(messages):
|
146 |
+
if i == 0:
|
147 |
+
assert message, "first message should not be none"
|
148 |
+
assert role == self.roles[0], "first message should come from user"
|
149 |
+
if message:
|
150 |
+
if type(message) is tuple:
|
151 |
+
message, _, _ = message
|
152 |
+
if i == 0:
|
153 |
+
message = wrap_sys(self.system) + message
|
154 |
+
if i % 2 == 0:
|
155 |
+
message = wrap_inst(message)
|
156 |
+
ret += self.sep + message
|
157 |
+
else:
|
158 |
+
ret += " " + message + " " + self.sep2
|
159 |
+
else:
|
160 |
+
ret += ""
|
161 |
+
ret = ret.lstrip(self.sep)
|
162 |
+
|
163 |
+
elif self.sep_style == SeparatorStyle.PLAIN:
|
164 |
+
seps = [self.sep, self.sep2]
|
165 |
+
ret = self.system
|
166 |
+
for i, (role, message) in enumerate(messages):
|
167 |
+
if message:
|
168 |
+
if type(message) is tuple:
|
169 |
+
message, _, _ = message
|
170 |
+
ret += message + seps[i % 2]
|
171 |
+
else:
|
172 |
+
ret += ""
|
173 |
+
else:
|
174 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
175 |
+
|
176 |
+
return ret
|
177 |
+
|
178 |
+
def append_message(self, role, message):
|
179 |
+
self.messages.append([role, message])
|
180 |
+
|
181 |
+
def process_image(self, image, image_process_mode, return_pil=False, image_format="PNG"):
|
182 |
+
if image_process_mode == "Pad":
|
183 |
+
|
184 |
+
def expand2square(pil_img, background_color=(122, 116, 104)):
|
185 |
+
width, height = pil_img.size
|
186 |
+
if width == height:
|
187 |
+
return pil_img
|
188 |
+
elif width > height:
|
189 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
190 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
191 |
+
return result
|
192 |
+
else:
|
193 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
194 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
195 |
+
return result
|
196 |
+
|
197 |
+
image = expand2square(image)
|
198 |
+
elif image_process_mode in ["Default", "Crop"]:
|
199 |
+
pass
|
200 |
+
elif image_process_mode == "Resize":
|
201 |
+
image = image.resize((336, 336))
|
202 |
+
else:
|
203 |
+
raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
|
204 |
+
|
205 |
+
if type(image) is not Image.Image:
|
206 |
+
image = Image.open(image).convert("RGB")
|
207 |
+
|
208 |
+
max_hw, min_hw = max(image.size), min(image.size)
|
209 |
+
aspect_ratio = max_hw / min_hw
|
210 |
+
max_len, min_len = 672, 448
|
211 |
+
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
212 |
+
longest_edge = int(shortest_edge * aspect_ratio)
|
213 |
+
W, H = image.size
|
214 |
+
if H > W:
|
215 |
+
H, W = longest_edge, shortest_edge
|
216 |
+
else:
|
217 |
+
H, W = shortest_edge, longest_edge
|
218 |
+
image = image.resize((W, H))
|
219 |
+
if return_pil:
|
220 |
+
return image
|
221 |
+
else:
|
222 |
+
buffered = BytesIO()
|
223 |
+
image.save(buffered, format=image_format)
|
224 |
+
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
225 |
+
return img_b64_str
|
226 |
+
|
227 |
+
def get_images(self, return_pil=False, return_path=False):
|
228 |
+
images = []
|
229 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
230 |
+
if i % 2 == 0:
|
231 |
+
if type(msg) is tuple:
|
232 |
+
msg, image, image_process_mode = msg
|
233 |
+
if type(image) != list:
|
234 |
+
image = [image]
|
235 |
+
for img in image:
|
236 |
+
if not return_path and self.is_image_file(img):
|
237 |
+
img = self.process_image(img, image_process_mode, return_pil=return_pil)
|
238 |
+
else:
|
239 |
+
images.append(img)
|
240 |
+
return images
|
241 |
+
|
242 |
+
def is_image_file(self, filename):
|
243 |
+
image_extensions = [".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp"]
|
244 |
+
return any(filename.lower().endswith(ext) for ext in image_extensions)
|
245 |
+
|
246 |
+
def is_video_file(self, filename):
|
247 |
+
video_extensions = [".mp4", ".mov", ".avi", ".mkv", ".wmv", ".flv", ".mpeg", ".mpg"]
|
248 |
+
return any(filename.lower().endswith(ext) for ext in video_extensions)
|
249 |
+
|
250 |
+
def to_gradio_chatbot(self):
|
251 |
+
ret = []
|
252 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
253 |
+
if i % 2 == 0:
|
254 |
+
if type(msg) is tuple:
|
255 |
+
msg, image, image_process_mode = msg
|
256 |
+
if type(image) != list:
|
257 |
+
image = [image]
|
258 |
+
if len(image) == 1:
|
259 |
+
msg = "<image>\n" + msg.replace("<image>", "").strip()
|
260 |
+
else:
|
261 |
+
msg = re.sub(r"(<image>)\n(?=<image>)", r"\1 ", msg)
|
262 |
+
|
263 |
+
img_str_list = []
|
264 |
+
for img in image:
|
265 |
+
if self.is_image_file(img):
|
266 |
+
img_b64_str = self.process_image(img, "Default", return_pil=False, image_format="JPEG")
|
267 |
+
img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" style="max-width: 256px; max-height: 256px; width: auto; height: auto; object-fit: contain;"/>'
|
268 |
+
img_str_list.append(img_str)
|
269 |
+
elif self.is_video_file(img):
|
270 |
+
ret.append(((img,), None))
|
271 |
+
|
272 |
+
msg = msg.strip()
|
273 |
+
img_place_holder = ""
|
274 |
+
for img_str in img_str_list:
|
275 |
+
img_place_holder += f"{img_str}\n\n"
|
276 |
+
|
277 |
+
if len(img_str_list) > 0:
|
278 |
+
msg = f"{img_place_holder}\n\n{msg}"
|
279 |
+
|
280 |
+
if len(msg) > 0:
|
281 |
+
ret.append([msg, None])
|
282 |
+
else:
|
283 |
+
ret.append([msg, None])
|
284 |
+
else:
|
285 |
+
ret[-1][-1] = msg
|
286 |
+
return ret
|
287 |
+
|
288 |
+
def copy(self):
|
289 |
+
return Conversation(system=self.system, roles=self.roles, messages=[[x, y] for x, y in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2, version=self.version)
|
290 |
+
|
291 |
+
def dict(self):
|
292 |
+
if len(self.get_images()) > 0:
|
293 |
+
return {
|
294 |
+
"system": self.system,
|
295 |
+
"roles": self.roles,
|
296 |
+
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
|
297 |
+
"offset": self.offset,
|
298 |
+
"sep": self.sep,
|
299 |
+
"sep2": self.sep2,
|
300 |
+
}
|
301 |
+
return {
|
302 |
+
"system": self.system,
|
303 |
+
"roles": self.roles,
|
304 |
+
"messages": self.messages,
|
305 |
+
"offset": self.offset,
|
306 |
+
"sep": self.sep,
|
307 |
+
"sep2": self.sep2,
|
308 |
+
}
|
309 |
+
|
310 |
+
|
311 |
+
conv_vicuna_v0 = Conversation(
|
312 |
+
system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
313 |
+
roles=("Human", "Assistant"),
|
314 |
+
messages=[
|
315 |
+
["Human", "What are the key differences between renewable and non-renewable energy sources?"],
|
316 |
+
[
|
317 |
+
"Assistant",
|
318 |
+
"Renewable energy sources are those that can be replenished naturally in a relatively "
|
319 |
+
"short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
|
320 |
+
"Non-renewable energy sources, on the other hand, are finite and will eventually be "
|
321 |
+
"depleted, such as coal, oil, and natural gas. Here are some key differences between "
|
322 |
+
"renewable and non-renewable energy sources:\n"
|
323 |
+
"1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
|
324 |
+
"energy sources are finite and will eventually run out.\n"
|
325 |
+
"2. Environmental impact: Renewable energy sources have a much lower environmental impact "
|
326 |
+
"than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
|
327 |
+
"and other negative effects.\n"
|
328 |
+
"3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
|
329 |
+
"have lower operational costs than non-renewable sources.\n"
|
330 |
+
"4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
|
331 |
+
"locations than non-renewable sources.\n"
|
332 |
+
"5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
|
333 |
+
"situations and needs, while non-renewable sources are more rigid and inflexible.\n"
|
334 |
+
"6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
|
335 |
+
"non-renewable sources are not, and their depletion can lead to economic and social instability.\n",
|
336 |
+
],
|
337 |
+
],
|
338 |
+
offset=2,
|
339 |
+
sep_style=SeparatorStyle.SINGLE,
|
340 |
+
sep="###",
|
341 |
+
)
|
342 |
+
|
343 |
+
conv_vicuna_v1 = Conversation(
|
344 |
+
system="A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
345 |
+
roles=("USER", "ASSISTANT"),
|
346 |
+
version="v1",
|
347 |
+
messages=[],
|
348 |
+
offset=0,
|
349 |
+
sep_style=SeparatorStyle.TWO,
|
350 |
+
sep=" ",
|
351 |
+
sep2="</s>",
|
352 |
+
)
|
353 |
+
|
354 |
+
conv_llama_2 = Conversation(
|
355 |
+
system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
|
356 |
+
|
357 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
|
358 |
+
roles=("USER", "ASSISTANT"),
|
359 |
+
version="llama_v2",
|
360 |
+
messages=[],
|
361 |
+
offset=0,
|
362 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
363 |
+
sep="<s>",
|
364 |
+
sep2="</s>",
|
365 |
+
)
|
366 |
+
|
367 |
+
conv_llava_llama_2 = Conversation(
|
368 |
+
system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
|
369 |
+
roles=("USER", "ASSISTANT"),
|
370 |
+
version="llama_v2",
|
371 |
+
messages=[],
|
372 |
+
offset=0,
|
373 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
374 |
+
sep="<s>",
|
375 |
+
sep2="</s>",
|
376 |
+
)
|
377 |
+
|
378 |
+
# conv_llava_llama_3 = Conversation(
|
379 |
+
# system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
|
380 |
+
# roles=("user", "assistant"),
|
381 |
+
# version="llama_v3",
|
382 |
+
# messages=[],
|
383 |
+
# offset=0,
|
384 |
+
# sep="<|eot_id|>",
|
385 |
+
# sep_style=SeparatorStyle.LLAMA_3,
|
386 |
+
# tokenizer_id="meta-llama/Meta-Llama-3-8B-Instruct",
|
387 |
+
# tokenizer=AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct"),
|
388 |
+
# stop_token_ids=[128009],
|
389 |
+
# )
|
390 |
+
|
391 |
+
conv_mistral_instruct = Conversation(
|
392 |
+
system="",
|
393 |
+
roles=("USER", "ASSISTANT"),
|
394 |
+
version="llama_v2",
|
395 |
+
messages=[],
|
396 |
+
offset=0,
|
397 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
398 |
+
sep="",
|
399 |
+
sep2="</s>",
|
400 |
+
)
|
401 |
+
|
402 |
+
conv_llava_llama_2_simple = Conversation(
|
403 |
+
system="Answer the questions about the visual content that the user provides.",
|
404 |
+
roles=("USER", "ASSISTANT"),
|
405 |
+
version="llama_v2",
|
406 |
+
messages=[],
|
407 |
+
offset=0,
|
408 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
409 |
+
sep="<s>",
|
410 |
+
sep2="</s>",
|
411 |
+
)
|
412 |
+
|
413 |
+
conv_llava_llama_2_mmtag = Conversation(
|
414 |
+
system="Answer the questions about the visual content that the user provides." "The visual content will be provided with the following format: <Image>visual content</Image>.",
|
415 |
+
roles=("USER", "ASSISTANT"),
|
416 |
+
version="llama_v2_mmtag",
|
417 |
+
messages=[],
|
418 |
+
offset=0,
|
419 |
+
sep_style=SeparatorStyle.LLAMA_2,
|
420 |
+
sep="<s>",
|
421 |
+
sep2="</s>",
|
422 |
+
)
|
423 |
+
|
424 |
+
conv_mpt = Conversation(
|
425 |
+
system="""<|im_start|>system
|
426 |
+
A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
427 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
428 |
+
version="mpt",
|
429 |
+
messages=[],
|
430 |
+
offset=0,
|
431 |
+
sep_style=SeparatorStyle.MPT,
|
432 |
+
sep="<|im_end|>",
|
433 |
+
)
|
434 |
+
|
435 |
+
conv_qwen = Conversation(
|
436 |
+
system="""<|im_start|>system
|
437 |
+
You are a helpful assistant.""",
|
438 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
439 |
+
version="qwen",
|
440 |
+
messages=[],
|
441 |
+
offset=0,
|
442 |
+
sep_style=SeparatorStyle.CHATML,
|
443 |
+
sep="<|im_end|>",
|
444 |
+
)
|
445 |
+
|
446 |
+
|
447 |
+
|
448 |
+
conv_internlm_2 = Conversation(
|
449 |
+
system="""<|im_start|>system
|
450 |
+
You are a helpful assistant.""",
|
451 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
452 |
+
version="internlm_2",
|
453 |
+
messages=[],
|
454 |
+
offset=0,
|
455 |
+
sep_style=SeparatorStyle.CHATML,
|
456 |
+
sep="<|im_end|>",
|
457 |
+
)
|
458 |
+
|
459 |
+
conv_gemma_instruct = Conversation(system="", roles=("<start_of_turn>user\n", "<start_of_turn>model\n"), version="gemma", messages=[], offset=0, sep_style=SeparatorStyle.GEMMA, sep="<end_of_turn>\n")
|
460 |
+
|
461 |
+
conv_llava_plain = Conversation(
|
462 |
+
system="",
|
463 |
+
roles=("", ""),
|
464 |
+
messages=[],
|
465 |
+
offset=0,
|
466 |
+
sep_style=SeparatorStyle.PLAIN,
|
467 |
+
sep="\n",
|
468 |
+
)
|
469 |
+
|
470 |
+
conv_llava_v0 = Conversation(
|
471 |
+
system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
472 |
+
roles=("Human", "Assistant"),
|
473 |
+
messages=[],
|
474 |
+
offset=0,
|
475 |
+
sep_style=SeparatorStyle.SINGLE,
|
476 |
+
sep="###",
|
477 |
+
)
|
478 |
+
|
479 |
+
conv_llava_v0_mmtag = Conversation(
|
480 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
481 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
482 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
483 |
+
roles=("Human", "Assistant"),
|
484 |
+
messages=[],
|
485 |
+
offset=0,
|
486 |
+
sep_style=SeparatorStyle.SINGLE,
|
487 |
+
sep="###",
|
488 |
+
version="v0_mmtag",
|
489 |
+
)
|
490 |
+
|
491 |
+
conv_llava_v1 = Conversation(
|
492 |
+
system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
493 |
+
roles=("USER", "ASSISTANT"),
|
494 |
+
version="v1",
|
495 |
+
messages=[],
|
496 |
+
offset=0,
|
497 |
+
sep_style=SeparatorStyle.TWO,
|
498 |
+
sep=" ",
|
499 |
+
sep2="</s>",
|
500 |
+
)
|
501 |
+
|
502 |
+
conv_llava_v1_mmtag = Conversation(
|
503 |
+
system="A chat between a curious user and an artificial intelligence assistant. "
|
504 |
+
"The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
|
505 |
+
"The visual content will be provided with the following format: <Image>visual content</Image>.",
|
506 |
+
roles=("USER", "ASSISTANT"),
|
507 |
+
messages=[],
|
508 |
+
offset=0,
|
509 |
+
sep_style=SeparatorStyle.TWO,
|
510 |
+
sep=" ",
|
511 |
+
sep2="</s>",
|
512 |
+
version="v1_mmtag",
|
513 |
+
)
|
514 |
+
|
515 |
+
conv_mistral_orca = Conversation(
|
516 |
+
system="""<|im_start|>system
|
517 |
+
You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!""",
|
518 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
519 |
+
version="mpt",
|
520 |
+
messages=[],
|
521 |
+
offset=0,
|
522 |
+
sep_style=SeparatorStyle.MPT,
|
523 |
+
sep="<|im_end|>",
|
524 |
+
)
|
525 |
+
|
526 |
+
conv_mistral_zephyr = Conversation(
|
527 |
+
system="""<|system|>
|
528 |
+
You are a helpful AI assistant.""",
|
529 |
+
roles=("<|user|>\n", "<|assistant|>\n"),
|
530 |
+
version="mpt",
|
531 |
+
messages=[],
|
532 |
+
offset=0,
|
533 |
+
sep_style=SeparatorStyle.MPT,
|
534 |
+
sep="</s>",
|
535 |
+
)
|
536 |
+
|
537 |
+
conv_mistral_direct = Conversation(
|
538 |
+
system="""<|im_start|>system
|
539 |
+
Answer the questions.""",
|
540 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
541 |
+
version="mpt",
|
542 |
+
messages=[],
|
543 |
+
offset=0,
|
544 |
+
sep_style=SeparatorStyle.MPT,
|
545 |
+
sep="<|im_end|>",
|
546 |
+
)
|
547 |
+
|
548 |
+
conv_chatml_direct = Conversation(
|
549 |
+
system="""<|im_start|>system
|
550 |
+
Answer the questions.""",
|
551 |
+
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
|
552 |
+
version="mpt",
|
553 |
+
messages=[],
|
554 |
+
offset=0,
|
555 |
+
sep_style=SeparatorStyle.MPT,
|
556 |
+
sep="<|im_end|>",
|
557 |
+
)
|
558 |
+
|
559 |
+
default_conversation = conv_vicuna_v0
|
560 |
+
conv_templates = {
|
561 |
+
"default": conv_vicuna_v0,
|
562 |
+
"v0": conv_vicuna_v0,
|
563 |
+
"v1": conv_vicuna_v1,
|
564 |
+
"vicuna_v1": conv_vicuna_v1,
|
565 |
+
"llama_2": conv_llama_2,
|
566 |
+
"mistral_instruct": conv_mistral_instruct,
|
567 |
+
"mistral_orca": conv_mistral_orca,
|
568 |
+
"mistral_zephyr": conv_mistral_zephyr,
|
569 |
+
"mistral_direct": conv_mistral_direct,
|
570 |
+
"plain": conv_llava_plain,
|
571 |
+
"v0_plain": conv_llava_plain,
|
572 |
+
"chatml_direct": conv_chatml_direct,
|
573 |
+
"llava_v0": conv_llava_v0,
|
574 |
+
"llava_v0_mmtag": conv_llava_v0_mmtag,
|
575 |
+
"llava_v1": conv_llava_v1,
|
576 |
+
"llava_v1_mmtag": conv_llava_v1_mmtag,
|
577 |
+
"llava_llama_2": conv_llava_llama_2,
|
578 |
+
# "llava_llama_3": conv_llava_llama_3,
|
579 |
+
"llava_llama_2_simple": conv_llava_llama_2_simple,
|
580 |
+
"llava_llama_2_mmtag": conv_llava_llama_2_mmtag,
|
581 |
+
"llava_mistral_instruct": conv_mistral_instruct,
|
582 |
+
"mpt": conv_mpt,
|
583 |
+
"qwen_1_5": conv_qwen,
|
584 |
+
"qwen_2": conv_qwen,
|
585 |
+
"internlm_2": conv_internlm_2,
|
586 |
+
"gemma_instruct": conv_gemma_instruct,
|
587 |
+
}
|
588 |
+
|
589 |
+
|
590 |
+
if __name__ == "__main__":
|
591 |
+
print(default_conversation.get_prompt())
|
592 |
+
print(default_conversation)
|
generation_config.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
151645,
|
6 |
+
151643
|
7 |
+
],
|
8 |
+
"pad_token_id": 151643,
|
9 |
+
"repetition_penalty": 1.1,
|
10 |
+
"temperature": 0.7,
|
11 |
+
"top_k": 20,
|
12 |
+
"top_p": 0.8,
|
13 |
+
"transformers_version": "4.39.2"
|
14 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
mm_projector_builder.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from typing import Callable, Tuple
|
4 |
+
|
5 |
+
|
6 |
+
def bipartite_soft_matching(
|
7 |
+
metric: torch.Tensor,
|
8 |
+
r: int,
|
9 |
+
) -> Tuple[Callable, Callable]:
|
10 |
+
"""
|
11 |
+
Applies ToMe with a balanced matching set (50%, 50%).
|
12 |
+
|
13 |
+
Input size is [batch, tokens, channels].
|
14 |
+
r indicates the number of tokens to remove (max 50% of tokens).
|
15 |
+
"""
|
16 |
+
protected = 0
|
17 |
+
|
18 |
+
t = metric.shape[1]
|
19 |
+
r = min(r, (t - protected) // 2)
|
20 |
+
|
21 |
+
assert r > 0, r
|
22 |
+
|
23 |
+
with torch.no_grad():
|
24 |
+
metric = metric / metric.norm(dim=-1, keepdim=True)
|
25 |
+
a, b = metric[..., ::2, :], metric[..., 1::2, :]
|
26 |
+
scores = a @ b.transpose(-1, -2)
|
27 |
+
|
28 |
+
node_max, node_idx = scores.max(dim=-1)
|
29 |
+
edge_idx = node_max.argsort(dim=-1, descending=True)[..., None]
|
30 |
+
|
31 |
+
unm_idx = edge_idx[..., r:, :] # Unmerged Tokens
|
32 |
+
src_idx = edge_idx[..., :r, :] # Merged Tokens
|
33 |
+
dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx)
|
34 |
+
|
35 |
+
def merge(x: torch.Tensor, mode="mean") -> torch.Tensor:
|
36 |
+
src, dst = x[..., ::2, :], x[..., 1::2, :]
|
37 |
+
n, t1, c = src.shape
|
38 |
+
unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c))
|
39 |
+
src = src.gather(dim=-2, index=src_idx.expand(n, r, c))
|
40 |
+
dst = dst.scatter_add(-2, dst_idx.expand(n, r, c), src) # , reduce=mode)
|
41 |
+
|
42 |
+
return torch.cat([unm, dst], dim=1)
|
43 |
+
|
44 |
+
def unmerge(x: torch.Tensor) -> torch.Tensor:
|
45 |
+
unm_len = unm_idx.shape[1]
|
46 |
+
unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]
|
47 |
+
n, _, c = unm.shape
|
48 |
+
|
49 |
+
src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c))
|
50 |
+
|
51 |
+
out = torch.zeros(n, metric.shape[1], c, device=x.device, dtype=x.dtype)
|
52 |
+
|
53 |
+
out[..., 1::2, :] = dst
|
54 |
+
out.scatter_(dim=-2, index=(2 * unm_idx).expand(n, unm_len, c), src=unm)
|
55 |
+
out.scatter_(dim=-2, index=(2 * src_idx).expand(n, r, c), src=src)
|
56 |
+
|
57 |
+
return out
|
58 |
+
|
59 |
+
return merge, unmerge
|
60 |
+
|
61 |
+
|
62 |
+
def merge_wavg(
|
63 |
+
merge: Callable, x: torch.Tensor, size: torch.Tensor = None
|
64 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
65 |
+
"""
|
66 |
+
Applies the merge function by taking a weighted average based on token size.
|
67 |
+
Returns the merged tensor and the new token sizes.
|
68 |
+
"""
|
69 |
+
if size is None:
|
70 |
+
size = torch.ones_like(x[..., 0, None])
|
71 |
+
|
72 |
+
x = merge(x * size, mode="sum")
|
73 |
+
size = merge(size, mode="sum")
|
74 |
+
|
75 |
+
x = x / size
|
76 |
+
return x, size
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
class ToMe16_mlp_hd64(nn.Module):
|
82 |
+
def __init__(self, config, vision_cfg):
|
83 |
+
super().__init__()
|
84 |
+
self._config = config
|
85 |
+
self.mm_hidden_size = config.mm_hidden_size
|
86 |
+
self.hw = vision_cfg.image_size // vision_cfg.patch_size
|
87 |
+
self.num_attention_heads = vision_cfg.num_attention_heads
|
88 |
+
self.mlp = nn.Sequential(nn.Linear(config.mm_hidden_size, config.hidden_size),
|
89 |
+
nn.GELU(),
|
90 |
+
nn.Linear(config.hidden_size, config.hidden_size))
|
91 |
+
self.max_pos_hw = self.hw
|
92 |
+
self.max_pos_num_frames = config.mm_pos_num_frames
|
93 |
+
self.num_image_patches_per_side = 8
|
94 |
+
self.num_frame_patches_per_side = 4
|
95 |
+
|
96 |
+
def merge_tokens(self, x, target_num_token):
|
97 |
+
r"""
|
98 |
+
x = torch.randn(10, 2560, c)
|
99 |
+
x = merge_tokens(x, r_merge_list=[1280])
|
100 |
+
"""
|
101 |
+
size = None
|
102 |
+
b, p, c = x.shape
|
103 |
+
tmp_p = p
|
104 |
+
r_merge_list = []
|
105 |
+
assert tmp_p > target_num_token, f"{tmp_p} should greater than {target_num_token}"
|
106 |
+
while tmp_p != target_num_token:
|
107 |
+
if tmp_p - target_num_token <= (tmp_p // 2):
|
108 |
+
r_merge_list.append(tmp_p - target_num_token)
|
109 |
+
break
|
110 |
+
else:
|
111 |
+
r_merge_list.append(tmp_p // 2)
|
112 |
+
tmp_p = tmp_p - (tmp_p // 2)
|
113 |
+
|
114 |
+
|
115 |
+
head = self.num_attention_heads
|
116 |
+
|
117 |
+
dim = c // head
|
118 |
+
for r in r_merge_list:
|
119 |
+
metric = x.reshape(b, p, head, dim).mean(2) # [b, p, c//head]
|
120 |
+
merge, _ = bipartite_soft_matching(
|
121 |
+
metric,
|
122 |
+
r
|
123 |
+
)
|
124 |
+
x, size = merge_wavg(merge, x, size)
|
125 |
+
_, p, _ = x.shape
|
126 |
+
|
127 |
+
return x
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
def forward(self, x, compress=False, local_num_frames=-1): # 单帧64
|
132 |
+
height = width = self.hw
|
133 |
+
assert height * width == x.shape[1]
|
134 |
+
|
135 |
+
if local_num_frames != -1 and local_num_frames != 1:
|
136 |
+
assert compress is True
|
137 |
+
if compress:
|
138 |
+
if local_num_frames != -1:
|
139 |
+
num_frames = local_num_frames
|
140 |
+
x = x.reshape(x.shape[0] // local_num_frames, -1, x.shape[-1])
|
141 |
+
else:
|
142 |
+
num_frames = x.shape[0]
|
143 |
+
x = x.reshape(1, -1, x.shape[-1])
|
144 |
+
num_tome_tokens = 16 * num_frames
|
145 |
+
else:
|
146 |
+
num_tome_tokens = 64
|
147 |
+
|
148 |
+
x = self.merge_tokens(x, target_num_token=num_tome_tokens)
|
149 |
+
x = self.mlp(x)
|
150 |
+
return x
|
151 |
+
|
152 |
+
@property
|
153 |
+
def config(self):
|
154 |
+
return {"mm_projector_type": "tome16_mlp_hd64"}
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
def build_vision_projector(config, delay_load=False, **kwargs):
|
160 |
+
projector_type = getattr(config, "mm_projector_type", "linear")
|
161 |
+
|
162 |
+
if projector_type == 'tome16_mlp_hd64':
|
163 |
+
return ToMe16_mlp_hd64(config, kwargs["vision_cfg"])
|
164 |
+
|
165 |
+
raise ValueError(f"Unknown projector type: {projector_type}")
|
mm_utils.py
ADDED
@@ -0,0 +1,855 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from io import BytesIO
|
3 |
+
import base64
|
4 |
+
import math
|
5 |
+
import ast
|
6 |
+
import re
|
7 |
+
import torch
|
8 |
+
from transformers import StoppingCriteria
|
9 |
+
from .constants import IMAGE_TOKEN_INDEX
|
10 |
+
import random
|
11 |
+
import os
|
12 |
+
import io
|
13 |
+
import av
|
14 |
+
import cv2
|
15 |
+
import imageio
|
16 |
+
from decord import VideoReader
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
######################## load video ########################
|
22 |
+
|
23 |
+
def get_index(num_frames, num_segments):
|
24 |
+
seg_size = float(num_frames - 1) / num_segments
|
25 |
+
start = int(seg_size / 2)
|
26 |
+
offsets = np.array([
|
27 |
+
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
|
28 |
+
])
|
29 |
+
return offsets
|
30 |
+
|
31 |
+
|
32 |
+
def pts_to_secs(pts: int, time_base: float, start_pts: int) -> float:
|
33 |
+
"""
|
34 |
+
Converts a present time with the given time base and start_pts offset to seconds.
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
time_in_seconds (float): The corresponding time in seconds.
|
38 |
+
|
39 |
+
https://github.com/facebookresearch/pytorchvideo/blob/main/pytorchvideo/data/utils.py#L54-L64
|
40 |
+
"""
|
41 |
+
if pts == math.inf:
|
42 |
+
return math.inf
|
43 |
+
|
44 |
+
return int(pts - start_pts) * time_base
|
45 |
+
|
46 |
+
|
47 |
+
def get_pyav_video_duration(video_reader):
|
48 |
+
video_stream = video_reader.streams.video[0]
|
49 |
+
video_duration = pts_to_secs(
|
50 |
+
video_stream.duration,
|
51 |
+
video_stream.time_base,
|
52 |
+
video_stream.start_time
|
53 |
+
)
|
54 |
+
return float(video_duration)
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
def get_frame_indices(num_frames, vlen, sample='middle', fix_start=None, input_fps=1, min_num_frames=1, max_num_frames=-1, local_num_frames=8):
|
59 |
+
|
60 |
+
if min_num_frames > vlen:
|
61 |
+
if sample == 'dynamic_fps1':
|
62 |
+
min_num_frames = (vlen // local_num_frames) * local_num_frames
|
63 |
+
else:
|
64 |
+
min_num_frames = vlen
|
65 |
+
|
66 |
+
|
67 |
+
if sample == 'dynamic_fps1':
|
68 |
+
|
69 |
+
duration = float(vlen) / input_fps
|
70 |
+
num_segments = int(duration // local_num_frames)
|
71 |
+
if num_segments == 0:
|
72 |
+
num_frames = local_num_frames
|
73 |
+
else:
|
74 |
+
num_frames = local_num_frames * num_segments
|
75 |
+
|
76 |
+
if max_num_frames > 0:
|
77 |
+
num_frames = min(num_frames, max_num_frames)
|
78 |
+
sample = "middle" # NOTE
|
79 |
+
|
80 |
+
# logger.info(f"? is OK (img), duation={duration} frames={num_frames}!!!!")
|
81 |
+
|
82 |
+
num_frames = max(min_num_frames, num_frames)
|
83 |
+
|
84 |
+
# print(f"\033[0;31m vlen={vlen}, input_fps={input_fps} num_frames={num_frames} \033[0m")
|
85 |
+
|
86 |
+
if sample in ["rand", "middle"]: # uniform sampling
|
87 |
+
acc_samples = min(num_frames, vlen)
|
88 |
+
# split the video into `acc_samples` intervals, and sample from each interval.
|
89 |
+
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
|
90 |
+
ranges = []
|
91 |
+
for idx, interv in enumerate(intervals[:-1]):
|
92 |
+
ranges.append((interv, intervals[idx + 1] - 1))
|
93 |
+
if sample == 'rand':
|
94 |
+
try:
|
95 |
+
frame_indices = [random.choice(range(x[0], x[1])) for x in ranges]
|
96 |
+
except:
|
97 |
+
frame_indices = np.random.permutation(vlen)[:acc_samples]
|
98 |
+
frame_indices.sort()
|
99 |
+
frame_indices = list(frame_indices)
|
100 |
+
elif fix_start is not None:
|
101 |
+
frame_indices = [x[0] + fix_start for x in ranges]
|
102 |
+
elif sample == 'middle':
|
103 |
+
frame_indices = [(x[0] + x[1]) // 2 for x in ranges]
|
104 |
+
else:
|
105 |
+
raise NotImplementedError
|
106 |
+
|
107 |
+
if len(frame_indices) < num_frames: # padded with last frame
|
108 |
+
padded_frame_indices = [frame_indices[-1]] * num_frames
|
109 |
+
padded_frame_indices[:len(frame_indices)] = frame_indices
|
110 |
+
frame_indices = padded_frame_indices
|
111 |
+
elif "fps" in sample: # fps0.5, sequentially sample frames at 0.5 fps
|
112 |
+
output_fps = float(sample[3:])
|
113 |
+
duration = float(vlen) / input_fps
|
114 |
+
delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents
|
115 |
+
frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta)
|
116 |
+
frame_indices = np.around(frame_seconds * input_fps).astype(int)
|
117 |
+
frame_indices = [e for e in frame_indices if e < vlen]
|
118 |
+
if max_num_frames > 0 and len(frame_indices) > max_num_frames:
|
119 |
+
frame_indices = frame_indices[:max_num_frames]
|
120 |
+
# frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames)
|
121 |
+
else:
|
122 |
+
raise ValueError(f"Not support sample type: {sample}")
|
123 |
+
|
124 |
+
|
125 |
+
return frame_indices
|
126 |
+
|
127 |
+
|
128 |
+
def read_frames_av(video_path, num_frames, sample='rand', client=None, fix_start=None, min_num_frames=1, max_num_frames=-1, clip=None, local_num_frames=8):
|
129 |
+
if clip is not None:
|
130 |
+
raise NotImplementedError("av don't support clip!!!")
|
131 |
+
if 's3://' in video_path:
|
132 |
+
video_bytes = client.get(video_path)
|
133 |
+
byteio = io.BytesIO(video_bytes)
|
134 |
+
byteio.seek(0)
|
135 |
+
reader = av.open(byteio)
|
136 |
+
else:
|
137 |
+
byteio = None
|
138 |
+
reader = av.open(video_path)
|
139 |
+
frames = [f.to_rgb().to_ndarray() for f in reader.decode(video=0)]
|
140 |
+
vlen = len(frames)
|
141 |
+
duration = get_pyav_video_duration(reader)
|
142 |
+
fps = vlen / float(duration)
|
143 |
+
frame_indices = get_frame_indices(
|
144 |
+
num_frames, vlen, sample=sample, fix_start=fix_start,
|
145 |
+
input_fps=fps, min_num_frames=min_num_frames, max_num_frames=max_num_frames, local_num_frames=local_num_frames
|
146 |
+
)
|
147 |
+
frames = np.stack([frames[idx] for idx in frame_indices]) # (T, H, W, C), torch.uint8
|
148 |
+
# frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8
|
149 |
+
if byteio != None:
|
150 |
+
byteio.close()
|
151 |
+
|
152 |
+
reader.close()
|
153 |
+
|
154 |
+
return frames, frame_indices, float(fps), duration
|
155 |
+
|
156 |
+
|
157 |
+
def read_frames_gif(
|
158 |
+
video_path, num_frames, sample='rand', fix_start=None,
|
159 |
+
min_num_frames=1, max_num_frames=-1, client=None, clip=None, local_num_frames=8
|
160 |
+
):
|
161 |
+
if clip is not None:
|
162 |
+
raise NotImplementedError("Gif don't support clip!!!")
|
163 |
+
if 's3://' in video_path:
|
164 |
+
video_bytes = client.get(video_path)
|
165 |
+
byteio = io.BytesIO(video_bytes)
|
166 |
+
gif = imageio.get_reader(byteio)
|
167 |
+
else:
|
168 |
+
byteio = None
|
169 |
+
gif = imageio.get_reader(video_path)
|
170 |
+
vlen = len(gif)
|
171 |
+
fps = 1.
|
172 |
+
duration = vlen / fps
|
173 |
+
frame_indices = get_frame_indices(
|
174 |
+
num_frames, vlen, sample=sample, fix_start=fix_start,
|
175 |
+
min_num_frames=min_num_frames,
|
176 |
+
max_num_frames=max_num_frames, local_num_frames=local_num_frames,
|
177 |
+
input_fps=fps
|
178 |
+
)
|
179 |
+
frames = []
|
180 |
+
|
181 |
+
min_h = min_w = 100000
|
182 |
+
hw_set = set()
|
183 |
+
for index, frame in enumerate(gif):
|
184 |
+
# for index in frame_idxs:
|
185 |
+
if index in frame_indices:
|
186 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
|
187 |
+
frame = frame.astype(np.uint8)
|
188 |
+
# # (H x W x C) to (C x H x W)
|
189 |
+
# frame = frame.permute(2, 0, 1)
|
190 |
+
frames.append(frame)
|
191 |
+
hw_set.add(frame.shape)
|
192 |
+
if frame.shape[0] < min_h:
|
193 |
+
min_h = frame.shape[0]
|
194 |
+
if frame.shape[1] < min_w:
|
195 |
+
min_w = frame.shape[1]
|
196 |
+
# print(hw_set, min_h, min_w)
|
197 |
+
if len(hw_set) > 1:
|
198 |
+
frames = [i[:min_h, :min_w] for i in frames]
|
199 |
+
|
200 |
+
frames = np.stack(frames) # .float() / 255
|
201 |
+
|
202 |
+
if byteio != None:
|
203 |
+
byteio.close()
|
204 |
+
|
205 |
+
return frames, frame_indices, float(fps), duration # for tgif
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
def read_frames_decord(
|
210 |
+
video_path, num_frames, sample='rand', fix_start=None, min_num_frames=1,
|
211 |
+
max_num_frames=-1, client=None, clip=None, local_num_frames=8
|
212 |
+
):
|
213 |
+
|
214 |
+
if video_path.endswith('.avi'):
|
215 |
+
return read_frames_av(video_path=video_path, num_frames=num_frames, sample=sample,
|
216 |
+
fix_start=fix_start, min_num_frames=min_num_frames, max_num_frames=max_num_frames,
|
217 |
+
client=client, clip=clip, local_num_frames=local_num_frames)
|
218 |
+
if 's3://' in video_path:
|
219 |
+
video_bytes = client.get(video_path)
|
220 |
+
if video_bytes is None or len(video_bytes) == 0:
|
221 |
+
raise ValueError(f"Can't read byte from {video_path}!")
|
222 |
+
byteio = io.BytesIO(video_bytes)
|
223 |
+
video_reader = VideoReader(byteio, num_threads=1)
|
224 |
+
else:
|
225 |
+
byteio = None
|
226 |
+
video_reader = VideoReader(video_path, num_threads=1)
|
227 |
+
vlen = len(video_reader)
|
228 |
+
fps = video_reader.get_avg_fps()
|
229 |
+
duration = vlen / float(fps)
|
230 |
+
|
231 |
+
|
232 |
+
if clip:
|
233 |
+
start, end = clip
|
234 |
+
start = max(0, start)
|
235 |
+
end = min(duration - 0.1, end)
|
236 |
+
duration = end - start
|
237 |
+
vlen = int(duration * fps)
|
238 |
+
start_index = int(start * fps)
|
239 |
+
|
240 |
+
frame_indices = get_frame_indices(
|
241 |
+
num_frames, vlen, sample=sample, fix_start=fix_start,
|
242 |
+
input_fps=fps, min_num_frames=min_num_frames, max_num_frames=max_num_frames, local_num_frames=local_num_frames
|
243 |
+
)
|
244 |
+
if clip:
|
245 |
+
frame_indices = [f + start_index for f in frame_indices]
|
246 |
+
|
247 |
+
# print(fps, frame_indices)
|
248 |
+
frames = video_reader.get_batch(frame_indices).asnumpy() # (T, H, W, C), torch.uint8
|
249 |
+
# https://github.com/dmlc/decord/issues/208
|
250 |
+
video_reader.seek(0)
|
251 |
+
|
252 |
+
if byteio != None:
|
253 |
+
byteio.close()
|
254 |
+
# frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8
|
255 |
+
return frames, frame_indices, float(fps), duration
|
256 |
+
|
257 |
+
|
258 |
+
|
259 |
+
def read_frames_img(
|
260 |
+
video_path, num_frames, sample='rand', fix_start=None, min_num_frames=1,
|
261 |
+
max_num_frames=-1, client=None, clip=None, local_num_frames=8
|
262 |
+
):
|
263 |
+
def extract_frame_number(filename):
|
264 |
+
# Extract the numeric part from the filename using regular expressions
|
265 |
+
if filename.endswith('.jpg'):
|
266 |
+
match = re.search(r'_(\d+).jpg$', filename)
|
267 |
+
elif filename.endswith('.jpeg'):
|
268 |
+
match = re.search(r'_(\d+).jpeg$', filename)
|
269 |
+
elif filename.endswith('.png'):
|
270 |
+
match = re.search(r'_(\d+).png$', filename)
|
271 |
+
else:
|
272 |
+
raise NotImplementedError(f"Wrong filename: {filename}")
|
273 |
+
|
274 |
+
return int(match.group(1)) if match else -1
|
275 |
+
|
276 |
+
|
277 |
+
def sort_frames(frame_paths):
|
278 |
+
# Extract filenames from each path and sort by their numeric part
|
279 |
+
return sorted(frame_paths, key=lambda x: extract_frame_number(os.path.basename(x)))
|
280 |
+
|
281 |
+
# img_list=[]
|
282 |
+
|
283 |
+
if "s3://" in video_path:
|
284 |
+
img_list = sort_frames(client.list(video_path))
|
285 |
+
else:
|
286 |
+
img_list = sort_frames(list(os.listdir(video_path)))
|
287 |
+
|
288 |
+
|
289 |
+
if 'tvqa' in video_path.lower():
|
290 |
+
fps = 3.0
|
291 |
+
else:
|
292 |
+
fps = 1.0
|
293 |
+
|
294 |
+
if clip is not None:
|
295 |
+
start = float(clip[0])
|
296 |
+
end = float(clip[1])
|
297 |
+
start = max(0, start)
|
298 |
+
end = min(len(img_list) / fps, end)
|
299 |
+
vlen = (end - start) * fps
|
300 |
+
else:
|
301 |
+
vlen = len(img_list)
|
302 |
+
|
303 |
+
duration = vlen / fps
|
304 |
+
|
305 |
+
if min_num_frames > vlen:
|
306 |
+
if sample == 'dynamic_fps1':
|
307 |
+
min_num_frames = (vlen // local_num_frames) * local_num_frames
|
308 |
+
else:
|
309 |
+
min_num_frames = vlen
|
310 |
+
|
311 |
+
if sample == 'dynamic_fps1':
|
312 |
+
num_segments = int(duration // local_num_frames)
|
313 |
+
if num_segments == 0:
|
314 |
+
num_frames = local_num_frames
|
315 |
+
else:
|
316 |
+
num_frames = local_num_frames * num_segments
|
317 |
+
num_frames = min(num_frames, max_num_frames)
|
318 |
+
num_frames = max(min_num_frames, num_frames)
|
319 |
+
|
320 |
+
num_frames = int(num_frames)
|
321 |
+
if clip is not None:
|
322 |
+
def _get_index_by_time(start_sec, end_sec, num_segments=8, fps=1., max_frame=9999):
|
323 |
+
start_idx = max(1, round(start_sec * fps))
|
324 |
+
end_idx = min(round(end_sec * fps), max_frame)
|
325 |
+
seg_size = float(end_idx - start_idx) / (num_segments - 1)
|
326 |
+
offsets = np.array([start_idx + int(np.round(seg_size * idx)) for idx in range(num_segments)])
|
327 |
+
return offsets
|
328 |
+
|
329 |
+
frame_indices = _get_index_by_time(float(clip[0]), float(clip[1]), num_segments=num_frames, fps=fps, max_frame=len(img_list)-1)
|
330 |
+
else:
|
331 |
+
frame_indices = get_frame_indices(
|
332 |
+
num_frames, vlen, sample=sample, fix_start=fix_start,
|
333 |
+
min_num_frames=min_num_frames,
|
334 |
+
max_num_frames=max_num_frames, local_num_frames=local_num_frames
|
335 |
+
)
|
336 |
+
|
337 |
+
imgs = []
|
338 |
+
for idx in frame_indices:
|
339 |
+
frame_fname = os.path.join(video_path, img_list[idx])
|
340 |
+
if "s3://" in video_path:
|
341 |
+
img_bytes = client.get(frame_fname)
|
342 |
+
else:
|
343 |
+
with open(frame_fname, 'rb') as f:
|
344 |
+
img_bytes = f.read()
|
345 |
+
img_np = np.frombuffer(img_bytes, np.uint8)
|
346 |
+
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
|
347 |
+
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
|
348 |
+
imgs.append(img)
|
349 |
+
|
350 |
+
frames = np.array(imgs, dtype=np.uint8)
|
351 |
+
|
352 |
+
|
353 |
+
return frames, frame_indices, fps, duration
|
354 |
+
|
355 |
+
|
356 |
+
|
357 |
+
VIDEO_READER_FUNCS = {
|
358 |
+
'av': read_frames_av,
|
359 |
+
'decord': read_frames_decord,
|
360 |
+
'gif': read_frames_gif,
|
361 |
+
'img': read_frames_img,
|
362 |
+
'frame': read_frames_img
|
363 |
+
}
|
364 |
+
|
365 |
+
|
366 |
+
|
367 |
+
def load_video(video_path, max_num_frames=512, media_dict=None): #, media_dict):
|
368 |
+
|
369 |
+
if media_dict is None:
|
370 |
+
media_dict = {'video_read_type': 'decord'}
|
371 |
+
|
372 |
+
if type(video_path) != str:
|
373 |
+
assert len(video_path) == 1, video_path
|
374 |
+
video_path = video_path[0]
|
375 |
+
|
376 |
+
if 'start' in media_dict:
|
377 |
+
clip = [media_dict['start'], media_dict['end']]
|
378 |
+
else:
|
379 |
+
clip = None
|
380 |
+
|
381 |
+
if 's3://' in video_path:
|
382 |
+
from petrel_client.client import Client
|
383 |
+
client = Client(conf_path='~/petreloss.conf')
|
384 |
+
else:
|
385 |
+
client = None
|
386 |
+
|
387 |
+
frames, frame_indices, fps, duration = VIDEO_READER_FUNCS[media_dict['video_read_type']](video_path=video_path, num_frames=max_num_frames, sample='dynamic_fps1', fix_start=None, min_num_frames=64, max_num_frames=max_num_frames, client=client, clip=clip, local_num_frames=8)
|
388 |
+
|
389 |
+
sec = [str(round(f / fps, 1)) for f in frame_indices]
|
390 |
+
|
391 |
+
msg = f"\nThe video lasts for {duration:.2f} seconds, and {len(sec)} frames are uniformly sampled from it. "
|
392 |
+
|
393 |
+
return frames, msg
|
394 |
+
|
395 |
+
|
396 |
+
######################## load video ########################
|
397 |
+
|
398 |
+
|
399 |
+
def resize_and_center_crop(image, shortest_edge_length):
|
400 |
+
# Calculate new dimensions and resize
|
401 |
+
aspect_ratio = float(image.width) / float(image.height)
|
402 |
+
if aspect_ratio > 1:
|
403 |
+
new_width = int(shortest_edge_length * aspect_ratio)
|
404 |
+
new_height = shortest_edge_length
|
405 |
+
else:
|
406 |
+
new_width = shortest_edge_length
|
407 |
+
new_height = int(shortest_edge_length / aspect_ratio)
|
408 |
+
resized_image = image.resize((new_width, new_height), Image.ANTIALIAS)
|
409 |
+
|
410 |
+
# Calculate the position and perform the center crop
|
411 |
+
left = (new_width - shortest_edge_length) / 2
|
412 |
+
top = (new_height - shortest_edge_length) / 2
|
413 |
+
right = (new_width + shortest_edge_length) / 2
|
414 |
+
bottom = (new_height + shortest_edge_length) / 2
|
415 |
+
cropped_image = resized_image.crop((left, top, right, bottom))
|
416 |
+
|
417 |
+
return cropped_image
|
418 |
+
|
419 |
+
|
420 |
+
def auto_pad_images(image, grid_params):
|
421 |
+
assert isinstance(image, Image.Image), "Input should be a Pillow Image"
|
422 |
+
assert len(grid_params) > 0, "Grid parameters should not be empty"
|
423 |
+
|
424 |
+
# Step 1: Calculate and find the closest aspect ratio
|
425 |
+
input_width, input_height = image.size
|
426 |
+
input_aspect_ratio = input_width / input_height
|
427 |
+
candidate_resolutions = [(w / h, w, h) for w in grid_params for h in grid_params]
|
428 |
+
closest_aspect_ratio = min(candidate_resolutions, key=lambda x: abs(input_aspect_ratio - x[0]))
|
429 |
+
|
430 |
+
candidate_resolutions = [(x[1], x[2]) for x in candidate_resolutions if abs(x[0] - closest_aspect_ratio[0]) < 1e-3]
|
431 |
+
|
432 |
+
target_resolution = min(candidate_resolutions, key=lambda res: abs(max(input_width, input_height) / max(res) - 1))
|
433 |
+
|
434 |
+
resize_width, resize_height = target_resolution
|
435 |
+
if input_width > input_height:
|
436 |
+
resize_height = int(resize_width / input_aspect_ratio)
|
437 |
+
else:
|
438 |
+
resize_width = int(resize_height * input_aspect_ratio)
|
439 |
+
resized_image = image.resize((resize_width, resize_height), Image.ANTIALIAS)
|
440 |
+
|
441 |
+
# Step 5: Pad the resized image if necessary to match the target resolution
|
442 |
+
pad_width = target_resolution[0] - resize_width
|
443 |
+
pad_height = target_resolution[1] - resize_height
|
444 |
+
padded_image = Image.new("RGB", target_resolution, color=(0, 0, 0))
|
445 |
+
padded_image.paste(resized_image, (pad_width // 2, pad_height // 2))
|
446 |
+
|
447 |
+
return padded_image
|
448 |
+
|
449 |
+
|
450 |
+
def extract_patches(image, patch_size, overlap_ratio):
|
451 |
+
assert isinstance(image, Image.Image), "Input should be a Pillow Image"
|
452 |
+
assert patch_size > 0, "Patch size should be greater than 0"
|
453 |
+
assert 0 <= overlap_ratio < 1, "Overlap ratio should be between 0 and 1"
|
454 |
+
|
455 |
+
W, H = image.size
|
456 |
+
patches = []
|
457 |
+
|
458 |
+
stride = int(patch_size * (1 - overlap_ratio))
|
459 |
+
|
460 |
+
num_patches_y = (H - patch_size) // stride + 1
|
461 |
+
num_patches_x = (W - patch_size) // stride + 1
|
462 |
+
|
463 |
+
y_start = (H - (num_patches_y - 1) * stride - patch_size) // 2
|
464 |
+
x_start = (W - (num_patches_x - 1) * stride - patch_size) // 2
|
465 |
+
|
466 |
+
for y in range(y_start, y_start + num_patches_y * stride, stride):
|
467 |
+
for x in range(x_start, x_start + num_patches_x * stride, stride):
|
468 |
+
patch = image.crop((x, y, x + patch_size, y + patch_size))
|
469 |
+
patches.append(patch)
|
470 |
+
|
471 |
+
return patches
|
472 |
+
|
473 |
+
|
474 |
+
def process_highres_image_crop_split(image, data_args, processor=None):
|
475 |
+
crop_resolution = data_args.image_crop_resolution
|
476 |
+
split_resolution = data_args.image_split_resolution
|
477 |
+
if processor is None:
|
478 |
+
processor = data_args.image_processor
|
479 |
+
image_crop = resize_and_center_crop(image, crop_resolution)
|
480 |
+
image_patches = extract_patches(image_crop, patch_size=split_resolution, overlap_ratio=0)
|
481 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
482 |
+
return torch.stack(image_patches, dim=0)
|
483 |
+
|
484 |
+
|
485 |
+
def process_highres_image(image, processor, grid_pinpoints):
|
486 |
+
grid_params = [int(x) for x in grid_pinpoints.split(",")]
|
487 |
+
width_height = max(image.size)
|
488 |
+
fit_grid_params = [x for x in grid_params if x >= width_height]
|
489 |
+
if len(fit_grid_params) == 0:
|
490 |
+
select_size = max(grid_params)
|
491 |
+
else:
|
492 |
+
select_size = min(fit_grid_params)
|
493 |
+
# FIXME: always select the 448
|
494 |
+
select_size = max(grid_params)
|
495 |
+
image_padded = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
|
496 |
+
|
497 |
+
# FIXME: this seems to be a bug that it always resizes instead of padding
|
498 |
+
image_original_resize = image.resize((processor.size["shortest_edge"], processor.size["shortest_edge"]))
|
499 |
+
image_padded = image_padded.resize((select_size, select_size))
|
500 |
+
image_patches = extract_patches(image_padded, patch_size=processor.size["shortest_edge"], overlap_ratio=0)
|
501 |
+
image_patches = [image_original_resize] + image_patches
|
502 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
503 |
+
return torch.stack(image_patches, dim=0)
|
504 |
+
|
505 |
+
|
506 |
+
def select_best_resolution(original_size, possible_resolutions, max_resolutions, patch_size):
|
507 |
+
"""
|
508 |
+
Selects the best resolution from a list of possible resolutions based on the original size.
|
509 |
+
|
510 |
+
Args:
|
511 |
+
original_size (tuple): The original size of the image in the format (width, height).
|
512 |
+
possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
|
513 |
+
|
514 |
+
Returns:
|
515 |
+
tuple: The best fit resolution in the format (width, height).
|
516 |
+
"""
|
517 |
+
original_width, original_height = original_size
|
518 |
+
best_fit = None
|
519 |
+
max_effective_resolution = 0
|
520 |
+
min_wasted_resolution = float("inf")
|
521 |
+
|
522 |
+
for width, height in possible_resolutions:
|
523 |
+
if max_resolutions != None and (width * height != patch_size * patch_size):
|
524 |
+
if (width * height+patch_size*patch_size) > max_resolutions: # NOTE 要算一个global
|
525 |
+
continue
|
526 |
+
# Calculate the downscaled size to keep the aspect ratio
|
527 |
+
scale = min(width / original_width, height / original_height)
|
528 |
+
downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
|
529 |
+
|
530 |
+
# Calculate effective and wasted resolutions
|
531 |
+
effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
|
532 |
+
wasted_resolution = (width * height) - effective_resolution
|
533 |
+
|
534 |
+
if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
|
535 |
+
max_effective_resolution = effective_resolution
|
536 |
+
min_wasted_resolution = wasted_resolution
|
537 |
+
best_fit = (width, height)
|
538 |
+
|
539 |
+
# print(f"original_size={original_size}, possible_resolutions={possible_resolutions}, max_resolutions={max_resolutions}, best_fit={best_fit}")
|
540 |
+
assert best_fit is not None, f"Can't find suitable fit in {possible_resolutions} at max:{max_resolutions}"
|
541 |
+
return best_fit
|
542 |
+
|
543 |
+
|
544 |
+
def resize_and_pad_image(image, target_resolution):
|
545 |
+
"""
|
546 |
+
Resize and pad an image to a target resolution while maintaining aspect ratio.
|
547 |
+
|
548 |
+
Args:
|
549 |
+
image (PIL.Image.Image): The input image.
|
550 |
+
target_resolution (tuple): The target resolution (width, height) of the image.
|
551 |
+
|
552 |
+
Returns:
|
553 |
+
PIL.Image.Image: The resized and padded image.
|
554 |
+
"""
|
555 |
+
original_width, original_height = image.size
|
556 |
+
target_width, target_height = target_resolution
|
557 |
+
|
558 |
+
# Determine which dimension (width or height) to fill
|
559 |
+
scale_w = target_width / original_width
|
560 |
+
scale_h = target_height / original_height
|
561 |
+
|
562 |
+
if scale_w < scale_h:
|
563 |
+
# Width will be filled completely
|
564 |
+
new_width = target_width
|
565 |
+
new_height = min(math.ceil(original_height * scale_w), target_height)
|
566 |
+
else:
|
567 |
+
# Height will be filled completely
|
568 |
+
new_height = target_height
|
569 |
+
new_width = min(math.ceil(original_width * scale_h), target_width)
|
570 |
+
|
571 |
+
# Resize the image
|
572 |
+
resized_image = image.resize((new_width, new_height))
|
573 |
+
|
574 |
+
# Create a new image with the target size and paste the resized image onto it
|
575 |
+
new_image = Image.new("RGB", (target_width, target_height), (0, 0, 0))
|
576 |
+
paste_x = (target_width - new_width) // 2
|
577 |
+
paste_y = (target_height - new_height) // 2
|
578 |
+
new_image.paste(resized_image, (paste_x, paste_y))
|
579 |
+
|
580 |
+
return new_image
|
581 |
+
|
582 |
+
|
583 |
+
def divide_to_patches(image, patch_size):
|
584 |
+
"""
|
585 |
+
Divides an image into patches of a specified size.
|
586 |
+
|
587 |
+
Args:
|
588 |
+
image (PIL.Image.Image): The input image.
|
589 |
+
patch_size (int): The size of each patch.
|
590 |
+
|
591 |
+
Returns:
|
592 |
+
list: A list of PIL.Image.Image objects representing the patches.
|
593 |
+
"""
|
594 |
+
patches = []
|
595 |
+
width, height = image.size
|
596 |
+
for i in range(0, height, patch_size):
|
597 |
+
for j in range(0, width, patch_size):
|
598 |
+
box = (j, i, j + patch_size, i + patch_size)
|
599 |
+
patch = image.crop(box)
|
600 |
+
patches.append(patch)
|
601 |
+
|
602 |
+
return patches
|
603 |
+
|
604 |
+
|
605 |
+
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size, max_resolutions=None):
|
606 |
+
"""
|
607 |
+
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
|
608 |
+
|
609 |
+
Args:
|
610 |
+
image_size (tuple): The size of the input image in the format (width, height).
|
611 |
+
grid_pinpoints (str): A string representation of a list of possible resolutions.
|
612 |
+
patch_size (int): The size of each image patch.
|
613 |
+
|
614 |
+
Returns:
|
615 |
+
tuple: The shape of the image patch grid in the format (width, height).
|
616 |
+
"""
|
617 |
+
if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
|
618 |
+
assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
|
619 |
+
# Use regex to extract the range from the input string
|
620 |
+
matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
|
621 |
+
range_start = tuple(map(int, matches[0]))
|
622 |
+
range_end = tuple(map(int, matches[-1]))
|
623 |
+
# Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
|
624 |
+
grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
|
625 |
+
# Multiply all elements by patch_size
|
626 |
+
grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
|
627 |
+
if type(grid_pinpoints) is list:
|
628 |
+
possible_resolutions = grid_pinpoints
|
629 |
+
else:
|
630 |
+
possible_resolutions = ast.literal_eval(grid_pinpoints)
|
631 |
+
width, height = select_best_resolution(image_size, possible_resolutions, max_resolutions=max_resolutions, patch_size=patch_size)
|
632 |
+
|
633 |
+
# print("get width/patch size", width, patch_size, flush=True)
|
634 |
+
|
635 |
+
return width // patch_size, height // patch_size
|
636 |
+
|
637 |
+
|
638 |
+
def process_anyres_image(image, processor, grid_pinpoints):
|
639 |
+
"""
|
640 |
+
Process an image with variable resolutions.
|
641 |
+
|
642 |
+
Args:
|
643 |
+
image (PIL.Image.Image): The input image to be processed.
|
644 |
+
processor: The image processor object.
|
645 |
+
grid_pinpoints (str): A string representation of a list of possible resolutions.
|
646 |
+
|
647 |
+
Returns:
|
648 |
+
torch.Tensor: A tensor containing the processed image patches.
|
649 |
+
"""
|
650 |
+
raise NotImplementedError
|
651 |
+
# Convert grid_pinpoints from string to list
|
652 |
+
if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
|
653 |
+
try:
|
654 |
+
patch_size = processor.size[0]
|
655 |
+
except Exception as e:
|
656 |
+
patch_size = processor.size["shortest_edge"]
|
657 |
+
assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
|
658 |
+
# Use regex to extract the range from the input string
|
659 |
+
matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
|
660 |
+
range_start = tuple(map(int, matches[0]))
|
661 |
+
range_end = tuple(map(int, matches[-1]))
|
662 |
+
# Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
|
663 |
+
grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
|
664 |
+
# Multiply all elements by patch_size
|
665 |
+
grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
|
666 |
+
|
667 |
+
if type(grid_pinpoints) is list:
|
668 |
+
possible_resolutions = grid_pinpoints
|
669 |
+
else:
|
670 |
+
possible_resolutions = ast.literal_eval(grid_pinpoints)
|
671 |
+
best_resolution = select_best_resolution(image.size, possible_resolutions)
|
672 |
+
image_padded = resize_and_pad_image(image, best_resolution)
|
673 |
+
|
674 |
+
patches = divide_to_patches(image_padded, processor.crop_size["height"])
|
675 |
+
|
676 |
+
# FIXME: this seems to be a bug that it resizes instead of pad.
|
677 |
+
# but to keep it consistent with previous, i will keep it as it is
|
678 |
+
# TODO: uncomment below to ablate with the padding
|
679 |
+
if isinstance(processor.size, dict):
|
680 |
+
shortest_edge = processor.size["shortest_edge"]
|
681 |
+
else:
|
682 |
+
shortest_edge = min(processor.size)
|
683 |
+
image_original_resize = image.resize((shortest_edge, shortest_edge))
|
684 |
+
# image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
685 |
+
# image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
|
686 |
+
|
687 |
+
image_patches = [image_original_resize] + patches
|
688 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
689 |
+
|
690 |
+
# print("image.size", image.size, "len(image_patches):", len(image_patches), "patch_size:", image_patches[0].shape)
|
691 |
+
return torch.stack(image_patches, dim=0)
|
692 |
+
|
693 |
+
def process_anyres_image_nopad(image, processor, grid_pinpoints):
|
694 |
+
"""
|
695 |
+
Process an image with variable resolutions.
|
696 |
+
|
697 |
+
Args:
|
698 |
+
image (PIL.Image.Image): The input image to be processed.
|
699 |
+
processor: The image processor object.
|
700 |
+
grid_pinpoints (str): A string representation of a list of possible resolutions.
|
701 |
+
|
702 |
+
Returns:
|
703 |
+
torch.Tensor: A tensor containing the processed image patches.
|
704 |
+
"""
|
705 |
+
# Convert grid_pinpoints from string to list
|
706 |
+
try:
|
707 |
+
patch_size = processor.size[0]
|
708 |
+
except Exception as e:
|
709 |
+
patch_size = processor.size["shortest_edge"]
|
710 |
+
|
711 |
+
assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
|
712 |
+
|
713 |
+
if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
|
714 |
+
|
715 |
+
# Use regex to extract the range from the input string
|
716 |
+
matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
|
717 |
+
range_start = tuple(map(int, matches[0]))
|
718 |
+
range_end = tuple(map(int, matches[-1]))
|
719 |
+
# Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
|
720 |
+
grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
|
721 |
+
# Multiply all elements by patch_size
|
722 |
+
grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
|
723 |
+
|
724 |
+
if type(grid_pinpoints) is list:
|
725 |
+
possible_resolutions = grid_pinpoints
|
726 |
+
else:
|
727 |
+
possible_resolutions = ast.literal_eval(grid_pinpoints)
|
728 |
+
best_resolution = select_best_resolution(image.size, possible_resolutions, max_resolutions=None, patch_size=patch_size) # 目前图像无限制
|
729 |
+
# image_padded = resize_and_pad_image(image, best_resolution)
|
730 |
+
|
731 |
+
patches = divide_to_patches(image.resize(best_resolution), patch_size)
|
732 |
+
|
733 |
+
# FIXME: this seems to be a bug that it resizes instead of pad.
|
734 |
+
# but to keep it consistent with previous, i will keep it as it is
|
735 |
+
# TODO: uncomment below to ablate with the padding
|
736 |
+
if isinstance(processor.size, dict):
|
737 |
+
shortest_edge = processor.size["shortest_edge"]
|
738 |
+
else:
|
739 |
+
shortest_edge = min(processor.size)
|
740 |
+
image_original_resize = image.resize((shortest_edge, shortest_edge))
|
741 |
+
# image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
742 |
+
# image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
|
743 |
+
|
744 |
+
image_patches = [image_original_resize] + patches
|
745 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
746 |
+
|
747 |
+
# raise ValueError(f"image.size: {image.size} len(image_patches): {len(image_patches)}, patch_size:, {image_patches[0].shape}, possible_resolutions:, {possible_resolutions}, best: {best_resolution}")
|
748 |
+
return torch.stack(image_patches, dim=0)
|
749 |
+
|
750 |
+
|
751 |
+
def load_image_from_base64(image):
|
752 |
+
return Image.open(BytesIO(base64.b64decode(image)))
|
753 |
+
|
754 |
+
|
755 |
+
def expand2square(pil_img, background_color):
|
756 |
+
width, height = pil_img.size
|
757 |
+
if width == height:
|
758 |
+
return pil_img
|
759 |
+
elif width > height:
|
760 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
761 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
762 |
+
return result
|
763 |
+
else:
|
764 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
765 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
766 |
+
return result
|
767 |
+
|
768 |
+
|
769 |
+
def process_images(images, image_processor, model_cfg):
|
770 |
+
image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
|
771 |
+
new_images = []
|
772 |
+
if image_aspect_ratio == "highres":
|
773 |
+
raise NotImplementedError
|
774 |
+
for image in images:
|
775 |
+
image = process_highres_image(image, image_processor, model_cfg.image_grid_pinpoints)
|
776 |
+
new_images.append(image)
|
777 |
+
elif "anyres" in image_aspect_ratio:
|
778 |
+
for image in images:
|
779 |
+
if "nopad" in image_aspect_ratio:
|
780 |
+
image = process_anyres_image_nopad(image, image_processor, model_cfg.image_grid_pinpoints)
|
781 |
+
else:
|
782 |
+
image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
|
783 |
+
new_images.append(image)
|
784 |
+
elif image_aspect_ratio == "crop_split":
|
785 |
+
raise NotImplementedError
|
786 |
+
for image in images:
|
787 |
+
image = process_highres_image_crop_split(image, model_cfg, image_processor)
|
788 |
+
new_images.append(image)
|
789 |
+
elif image_aspect_ratio == "pad":
|
790 |
+
for image in images:
|
791 |
+
image = expand2square(image, tuple(int(x * 255) for x in image_processor.image_mean))
|
792 |
+
image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
|
793 |
+
new_images.append(image)
|
794 |
+
else:
|
795 |
+
return image_processor.preprocess(images, return_tensors="pt")["pixel_values"]
|
796 |
+
if all(x.shape == new_images[0].shape for x in new_images):
|
797 |
+
new_images = torch.stack(new_images, dim=0)
|
798 |
+
return new_images
|
799 |
+
|
800 |
+
|
801 |
+
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
|
802 |
+
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
|
803 |
+
|
804 |
+
def insert_separator(X, sep):
|
805 |
+
return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
|
806 |
+
|
807 |
+
input_ids = []
|
808 |
+
offset = 0
|
809 |
+
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
|
810 |
+
offset = 1
|
811 |
+
input_ids.append(prompt_chunks[0][0])
|
812 |
+
|
813 |
+
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
|
814 |
+
input_ids.extend(x[offset:])
|
815 |
+
|
816 |
+
if return_tensors is not None:
|
817 |
+
if return_tensors == "pt":
|
818 |
+
return torch.tensor(input_ids, dtype=torch.long)
|
819 |
+
raise ValueError(f"Unsupported tensor type: {return_tensors}")
|
820 |
+
return input_ids
|
821 |
+
|
822 |
+
|
823 |
+
def get_model_name_from_path(model_path):
|
824 |
+
model_path = model_path.strip("/")
|
825 |
+
model_paths = model_path.split("/")
|
826 |
+
if model_paths[-1].startswith("checkpoint-"):
|
827 |
+
return model_paths[-2] + "_" + model_paths[-1]
|
828 |
+
else:
|
829 |
+
return model_paths[-1]
|
830 |
+
|
831 |
+
|
832 |
+
class KeywordsStoppingCriteria(StoppingCriteria):
|
833 |
+
def __init__(self, keywords, tokenizer, input_ids):
|
834 |
+
self.keywords = keywords
|
835 |
+
self.keyword_ids = []
|
836 |
+
for keyword in keywords:
|
837 |
+
cur_keyword_ids = tokenizer(keyword).input_ids
|
838 |
+
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
|
839 |
+
cur_keyword_ids = cur_keyword_ids[1:]
|
840 |
+
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
|
841 |
+
self.tokenizer = tokenizer
|
842 |
+
self.start_len = input_ids.shape[1]
|
843 |
+
|
844 |
+
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
845 |
+
assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
|
846 |
+
offset = min(output_ids.shape[1] - self.start_len, 3)
|
847 |
+
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
|
848 |
+
for keyword_id in self.keyword_ids:
|
849 |
+
if output_ids[0, -keyword_id.shape[0] :] == keyword_id:
|
850 |
+
return True
|
851 |
+
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
|
852 |
+
for keyword in self.keywords:
|
853 |
+
if keyword in outputs:
|
854 |
+
return True
|
855 |
+
return False
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d7599e40867eaac136fffa154b070c71aa22bc73b3c1ceece1dcb094f70b475
|
3 |
+
size 4143085560
|
modeling_qwen2_flash.py
ADDED
@@ -0,0 +1,1545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# transformers==4.39.2 NOTE
|
3 |
+
# Borrows some implementations from https://github.com/Cooperx521/PyramidDrop, thanks!
|
4 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
5 |
+
#
|
6 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
7 |
+
# and OPT implementations in this library. It has been modified from its
|
8 |
+
# original forms to accommodate minor architectural differences compared
|
9 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
10 |
+
#
|
11 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
12 |
+
# you may not use this file except in compliance with the License.
|
13 |
+
# You may obtain a copy of the License at
|
14 |
+
#
|
15 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
16 |
+
#
|
17 |
+
# Unless required by applicable law or agreed to in writing, software
|
18 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
19 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
20 |
+
# See the License for the specific language governing permissions and
|
21 |
+
# limitations under the License.
|
22 |
+
""" PyTorch Qwen2 model."""
|
23 |
+
import inspect
|
24 |
+
import math
|
25 |
+
import warnings
|
26 |
+
from typing import List, Optional, Tuple, Union
|
27 |
+
|
28 |
+
import torch
|
29 |
+
import torch.nn.functional as F
|
30 |
+
import torch.utils.checkpoint
|
31 |
+
from torch import nn
|
32 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
33 |
+
|
34 |
+
from transformers.activations import ACT2FN
|
35 |
+
from transformers.cache_utils import Cache, DynamicCache
|
36 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
|
37 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
38 |
+
from transformers.modeling_utils import PreTrainedModel
|
39 |
+
from transformers.utils import (
|
40 |
+
add_start_docstrings,
|
41 |
+
add_start_docstrings_to_model_forward,
|
42 |
+
is_flash_attn_2_available,
|
43 |
+
is_flash_attn_greater_or_equal_2_10,
|
44 |
+
logging,
|
45 |
+
replace_return_docstrings,
|
46 |
+
)
|
47 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
48 |
+
from .constants import IGNORE_INDEX
|
49 |
+
|
50 |
+
|
51 |
+
if is_flash_attn_2_available():
|
52 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
53 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
54 |
+
|
55 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
56 |
+
|
57 |
+
|
58 |
+
logger = logging.get_logger(__name__)
|
59 |
+
|
60 |
+
|
61 |
+
_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
|
62 |
+
_CONFIG_FOR_DOC = "Qwen2Config"
|
63 |
+
|
64 |
+
QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
65 |
+
"Qwen/Qwen2-7B-beta",
|
66 |
+
# See all Qwen2 models at https://huggingface.co/models?filter=qwen2
|
67 |
+
]
|
68 |
+
|
69 |
+
|
70 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
71 |
+
def _get_unpad_data(attention_mask):
|
72 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
73 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
74 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
75 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
76 |
+
return (
|
77 |
+
indices,
|
78 |
+
cu_seqlens,
|
79 |
+
max_seqlen_in_batch,
|
80 |
+
)
|
81 |
+
|
82 |
+
|
83 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
|
84 |
+
class Qwen2RMSNorm(nn.Module):
|
85 |
+
def __init__(self, hidden_size, eps=1e-6):
|
86 |
+
"""
|
87 |
+
Qwen2RMSNorm is equivalent to T5LayerNorm
|
88 |
+
"""
|
89 |
+
super().__init__()
|
90 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
91 |
+
self.variance_epsilon = eps
|
92 |
+
|
93 |
+
def forward(self, hidden_states):
|
94 |
+
input_dtype = hidden_states.dtype
|
95 |
+
hidden_states = hidden_states.to(torch.float32)
|
96 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
97 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
98 |
+
return self.weight * hidden_states.to(input_dtype)
|
99 |
+
|
100 |
+
|
101 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
|
102 |
+
class Qwen2RotaryEmbedding(nn.Module):
|
103 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
104 |
+
super().__init__()
|
105 |
+
|
106 |
+
self.dim = dim
|
107 |
+
self.max_position_embeddings = max_position_embeddings
|
108 |
+
self.base = base
|
109 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
110 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
111 |
+
|
112 |
+
# Build here to make `torch.jit.trace` work.
|
113 |
+
self._set_cos_sin_cache(
|
114 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
115 |
+
)
|
116 |
+
|
117 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
118 |
+
self.max_seq_len_cached = seq_len
|
119 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
120 |
+
|
121 |
+
freqs = torch.outer(t, self.inv_freq)
|
122 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
123 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
124 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
125 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
126 |
+
|
127 |
+
def forward(self, x, seq_len=None):
|
128 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
129 |
+
if seq_len > self.max_seq_len_cached:
|
130 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
131 |
+
|
132 |
+
return (
|
133 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
134 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
135 |
+
)
|
136 |
+
|
137 |
+
|
138 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
139 |
+
def rotate_half(x):
|
140 |
+
"""Rotates half the hidden dims of the input."""
|
141 |
+
x1 = x[..., : x.shape[-1] // 2]
|
142 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
143 |
+
return torch.cat((-x2, x1), dim=-1)
|
144 |
+
|
145 |
+
|
146 |
+
# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
|
147 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
148 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
149 |
+
|
150 |
+
Args:
|
151 |
+
q (`torch.Tensor`): The query tensor.
|
152 |
+
k (`torch.Tensor`): The key tensor.
|
153 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
154 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
155 |
+
position_ids (`torch.Tensor`):
|
156 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
157 |
+
used to pass offsetted position ids when working with a KV-cache.
|
158 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
159 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
160 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
161 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
162 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
163 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
164 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
165 |
+
Returns:
|
166 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
167 |
+
"""
|
168 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
169 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
170 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
171 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
172 |
+
return q_embed, k_embed
|
173 |
+
|
174 |
+
|
175 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
|
176 |
+
class Qwen2MLP(nn.Module):
|
177 |
+
def __init__(self, config):
|
178 |
+
super().__init__()
|
179 |
+
self.config = config
|
180 |
+
self.hidden_size = config.hidden_size
|
181 |
+
self.intermediate_size = config.intermediate_size
|
182 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
183 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
184 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
185 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
186 |
+
|
187 |
+
def forward(self, x):
|
188 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
189 |
+
|
190 |
+
|
191 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
192 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
193 |
+
"""
|
194 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
195 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
196 |
+
"""
|
197 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
198 |
+
if n_rep == 1:
|
199 |
+
return hidden_states
|
200 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
201 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
202 |
+
|
203 |
+
|
204 |
+
class Qwen2Attention(nn.Module):
|
205 |
+
"""
|
206 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
207 |
+
and "Generating Long Sequences with Sparse Transformers".
|
208 |
+
"""
|
209 |
+
|
210 |
+
def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
|
211 |
+
super().__init__()
|
212 |
+
self.config = config
|
213 |
+
self.layer_idx = layer_idx
|
214 |
+
if layer_idx is None:
|
215 |
+
logger.warning_once(
|
216 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
217 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
218 |
+
"when creating this class."
|
219 |
+
)
|
220 |
+
|
221 |
+
self.hidden_size = config.hidden_size
|
222 |
+
self.num_heads = config.num_attention_heads
|
223 |
+
self.head_dim = self.hidden_size // self.num_heads
|
224 |
+
self.num_key_value_heads = config.num_key_value_heads
|
225 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
226 |
+
self.max_position_embeddings = config.max_position_embeddings
|
227 |
+
self.rope_theta = config.rope_theta
|
228 |
+
self.is_causal = True
|
229 |
+
self.attention_dropout = config.attention_dropout
|
230 |
+
|
231 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
232 |
+
raise ValueError(
|
233 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
234 |
+
f" and `num_heads`: {self.num_heads})."
|
235 |
+
)
|
236 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
237 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
238 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
239 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
240 |
+
|
241 |
+
self.rotary_emb = Qwen2RotaryEmbedding(
|
242 |
+
self.head_dim,
|
243 |
+
max_position_embeddings=self.max_position_embeddings,
|
244 |
+
base=self.rope_theta,
|
245 |
+
)
|
246 |
+
|
247 |
+
def forward(
|
248 |
+
self,
|
249 |
+
hidden_states: torch.Tensor,
|
250 |
+
attention_mask: Optional[torch.Tensor] = None,
|
251 |
+
position_ids: Optional[torch.LongTensor] = None,
|
252 |
+
past_key_value: Optional[Cache] = None,
|
253 |
+
output_attentions: bool = False,
|
254 |
+
use_cache: bool = False,
|
255 |
+
**kwargs,
|
256 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
257 |
+
if "padding_mask" in kwargs:
|
258 |
+
warnings.warn(
|
259 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
260 |
+
)
|
261 |
+
bsz, q_len, _ = hidden_states.size()
|
262 |
+
|
263 |
+
query_states = self.q_proj(hidden_states)
|
264 |
+
key_states = self.k_proj(hidden_states)
|
265 |
+
value_states = self.v_proj(hidden_states)
|
266 |
+
|
267 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
268 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
269 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
270 |
+
|
271 |
+
kv_seq_len = key_states.shape[-2]
|
272 |
+
if past_key_value is not None:
|
273 |
+
if self.layer_idx is None:
|
274 |
+
raise ValueError(
|
275 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
276 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
277 |
+
"with a layer index."
|
278 |
+
)
|
279 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
280 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
281 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
282 |
+
|
283 |
+
if past_key_value is not None:
|
284 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
285 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
286 |
+
|
287 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
288 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
289 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
290 |
+
|
291 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
292 |
+
|
293 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
294 |
+
raise ValueError(
|
295 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
296 |
+
f" {attn_weights.size()}"
|
297 |
+
)
|
298 |
+
|
299 |
+
if attention_mask is not None:
|
300 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
301 |
+
raise ValueError(
|
302 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
303 |
+
)
|
304 |
+
|
305 |
+
attn_weights = attn_weights + attention_mask
|
306 |
+
|
307 |
+
# upcast attention to fp32
|
308 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
309 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
310 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
311 |
+
|
312 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
313 |
+
raise ValueError(
|
314 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
315 |
+
f" {attn_output.size()}"
|
316 |
+
)
|
317 |
+
|
318 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
319 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
320 |
+
|
321 |
+
attn_output = self.o_proj(attn_output)
|
322 |
+
|
323 |
+
if not output_attentions:
|
324 |
+
attn_weights = None
|
325 |
+
|
326 |
+
return attn_output, attn_weights, past_key_value
|
327 |
+
|
328 |
+
|
329 |
+
class Qwen2FlashAttention2(Qwen2Attention):
|
330 |
+
"""
|
331 |
+
Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
|
332 |
+
as the weights of the module stays untouched. The only required change would be on the forward pass
|
333 |
+
where it needs to correctly call the public API of flash attention and deal with padding tokens
|
334 |
+
in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
|
335 |
+
config.max_window_layers layers.
|
336 |
+
"""
|
337 |
+
|
338 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
339 |
+
def __init__(self, *args, **kwargs):
|
340 |
+
super().__init__(*args, **kwargs)
|
341 |
+
|
342 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
343 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
344 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
345 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
346 |
+
|
347 |
+
def forward(
|
348 |
+
self,
|
349 |
+
hidden_states: torch.Tensor,
|
350 |
+
attention_mask: Optional[torch.Tensor] = None,
|
351 |
+
position_ids: Optional[torch.LongTensor] = None,
|
352 |
+
past_key_value: Optional[Cache] = None,
|
353 |
+
output_attentions: bool = False,
|
354 |
+
use_cache: bool = False,
|
355 |
+
**kwargs,
|
356 |
+
):
|
357 |
+
if "padding_mask" in kwargs:
|
358 |
+
warnings.warn(
|
359 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
360 |
+
)
|
361 |
+
|
362 |
+
# overwrite attention_mask with padding_mask
|
363 |
+
attention_mask = kwargs.pop("padding_mask")
|
364 |
+
bsz, q_len, _ = hidden_states.size()
|
365 |
+
|
366 |
+
query_states = self.q_proj(hidden_states)
|
367 |
+
key_states = self.k_proj(hidden_states)
|
368 |
+
value_states = self.v_proj(hidden_states)
|
369 |
+
|
370 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
371 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
372 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
373 |
+
|
374 |
+
kv_seq_len = key_states.shape[-2]
|
375 |
+
if past_key_value is not None:
|
376 |
+
if self.layer_idx is None:
|
377 |
+
raise ValueError(
|
378 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
379 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
380 |
+
"with a layer index."
|
381 |
+
)
|
382 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
383 |
+
|
384 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
385 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
386 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
387 |
+
|
388 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
389 |
+
|
390 |
+
use_sliding_windows = (
|
391 |
+
_flash_supports_window_size
|
392 |
+
and getattr(self.config, "sliding_window", None) is not None
|
393 |
+
and kv_seq_len > self.config.sliding_window
|
394 |
+
and self.config.use_sliding_window
|
395 |
+
)
|
396 |
+
|
397 |
+
if not _flash_supports_window_size:
|
398 |
+
logger.warning_once(
|
399 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
400 |
+
" make sure to upgrade flash-attn library."
|
401 |
+
)
|
402 |
+
|
403 |
+
if past_key_value is not None:
|
404 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
405 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
406 |
+
if (
|
407 |
+
getattr(self.config, "sliding_window", None) is not None
|
408 |
+
and kv_seq_len > self.config.sliding_window
|
409 |
+
and cache_has_contents
|
410 |
+
):
|
411 |
+
slicing_tokens = 1 - self.config.sliding_window
|
412 |
+
|
413 |
+
past_key = past_key_value[self.layer_idx][0]
|
414 |
+
past_value = past_key_value[self.layer_idx][1]
|
415 |
+
|
416 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
417 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
418 |
+
|
419 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
420 |
+
raise ValueError(
|
421 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
422 |
+
f" {past_key.shape}"
|
423 |
+
)
|
424 |
+
|
425 |
+
if attention_mask is not None:
|
426 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
427 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
428 |
+
|
429 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
430 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
431 |
+
|
432 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
433 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
434 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
435 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
436 |
+
|
437 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
438 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
439 |
+
# cast them back in float16 just to be sure everything works as expected.
|
440 |
+
input_dtype = query_states.dtype
|
441 |
+
if input_dtype == torch.float32:
|
442 |
+
if torch.is_autocast_enabled():
|
443 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
444 |
+
# Handle the case where the model is quantized
|
445 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
446 |
+
target_dtype = self.config._pre_quantization_dtype
|
447 |
+
else:
|
448 |
+
target_dtype = self.q_proj.weight.dtype
|
449 |
+
|
450 |
+
logger.warning_once(
|
451 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
452 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
453 |
+
f" {target_dtype}."
|
454 |
+
)
|
455 |
+
|
456 |
+
query_states = query_states.to(target_dtype)
|
457 |
+
key_states = key_states.to(target_dtype)
|
458 |
+
value_states = value_states.to(target_dtype)
|
459 |
+
|
460 |
+
# Reashape to the expected shape for Flash Attention
|
461 |
+
query_states = query_states.transpose(1, 2)
|
462 |
+
key_states = key_states.transpose(1, 2)
|
463 |
+
value_states = value_states.transpose(1, 2)
|
464 |
+
|
465 |
+
attn_output = self._flash_attention_forward(
|
466 |
+
query_states,
|
467 |
+
key_states,
|
468 |
+
value_states,
|
469 |
+
attention_mask,
|
470 |
+
q_len,
|
471 |
+
dropout=dropout_rate,
|
472 |
+
use_sliding_windows=use_sliding_windows,
|
473 |
+
)
|
474 |
+
|
475 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
476 |
+
attn_output = self.o_proj(attn_output)
|
477 |
+
|
478 |
+
if not output_attentions:
|
479 |
+
attn_weights = None
|
480 |
+
|
481 |
+
return attn_output, attn_weights, past_key_value
|
482 |
+
|
483 |
+
def _flash_attention_forward(
|
484 |
+
self,
|
485 |
+
query_states,
|
486 |
+
key_states,
|
487 |
+
value_states,
|
488 |
+
attention_mask,
|
489 |
+
query_length,
|
490 |
+
dropout=0.0,
|
491 |
+
softmax_scale=None,
|
492 |
+
use_sliding_windows=False,
|
493 |
+
):
|
494 |
+
"""
|
495 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
496 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
497 |
+
|
498 |
+
Args:
|
499 |
+
query_states (`torch.Tensor`):
|
500 |
+
Input query states to be passed to Flash Attention API
|
501 |
+
key_states (`torch.Tensor`):
|
502 |
+
Input key states to be passed to Flash Attention API
|
503 |
+
value_states (`torch.Tensor`):
|
504 |
+
Input value states to be passed to Flash Attention API
|
505 |
+
attention_mask (`torch.Tensor`):
|
506 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
507 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
508 |
+
dropout (`float`):
|
509 |
+
Attention dropout
|
510 |
+
softmax_scale (`float`, *optional*):
|
511 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
512 |
+
use_sliding_windows (`bool`, *optional*):
|
513 |
+
Whether to activate sliding window attention.
|
514 |
+
"""
|
515 |
+
if not self._flash_attn_uses_top_left_mask:
|
516 |
+
causal = self.is_causal
|
517 |
+
else:
|
518 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
519 |
+
causal = self.is_causal and query_length != 1
|
520 |
+
|
521 |
+
# Decide whether to use SWA or not by layer index.
|
522 |
+
if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
|
523 |
+
use_sliding_windows = False
|
524 |
+
|
525 |
+
# Contains at least one padding token in the sequence
|
526 |
+
if attention_mask is not None:
|
527 |
+
batch_size = query_states.shape[0]
|
528 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
529 |
+
query_states, key_states, value_states, attention_mask, query_length
|
530 |
+
)
|
531 |
+
|
532 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
533 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
534 |
+
|
535 |
+
if not use_sliding_windows:
|
536 |
+
attn_output_unpad = flash_attn_varlen_func(
|
537 |
+
query_states,
|
538 |
+
key_states,
|
539 |
+
value_states,
|
540 |
+
cu_seqlens_q=cu_seqlens_q,
|
541 |
+
cu_seqlens_k=cu_seqlens_k,
|
542 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
543 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
544 |
+
dropout_p=dropout,
|
545 |
+
softmax_scale=softmax_scale,
|
546 |
+
causal=causal,
|
547 |
+
)
|
548 |
+
else:
|
549 |
+
attn_output_unpad = flash_attn_varlen_func(
|
550 |
+
query_states,
|
551 |
+
key_states,
|
552 |
+
value_states,
|
553 |
+
cu_seqlens_q=cu_seqlens_q,
|
554 |
+
cu_seqlens_k=cu_seqlens_k,
|
555 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
556 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
557 |
+
dropout_p=dropout,
|
558 |
+
softmax_scale=softmax_scale,
|
559 |
+
causal=causal,
|
560 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
561 |
+
)
|
562 |
+
|
563 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
564 |
+
else:
|
565 |
+
if not use_sliding_windows:
|
566 |
+
attn_output = flash_attn_func(
|
567 |
+
query_states,
|
568 |
+
key_states,
|
569 |
+
value_states,
|
570 |
+
dropout,
|
571 |
+
softmax_scale=softmax_scale,
|
572 |
+
causal=causal,
|
573 |
+
)
|
574 |
+
else:
|
575 |
+
attn_output = flash_attn_func(
|
576 |
+
query_states,
|
577 |
+
key_states,
|
578 |
+
value_states,
|
579 |
+
dropout,
|
580 |
+
softmax_scale=softmax_scale,
|
581 |
+
causal=causal,
|
582 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
583 |
+
)
|
584 |
+
|
585 |
+
return attn_output
|
586 |
+
|
587 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
588 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
589 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
590 |
+
|
591 |
+
# On the first iteration we need to properly re-create the padding mask
|
592 |
+
# by slicing it on the proper place
|
593 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
594 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
595 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
596 |
+
|
597 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
598 |
+
|
599 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
600 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
601 |
+
|
602 |
+
if query_length == kv_seq_len:
|
603 |
+
query_layer = index_first_axis(
|
604 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
605 |
+
)
|
606 |
+
cu_seqlens_q = cu_seqlens_k
|
607 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
608 |
+
indices_q = indices_k
|
609 |
+
elif query_length == 1:
|
610 |
+
max_seqlen_in_batch_q = 1
|
611 |
+
cu_seqlens_q = torch.arange(
|
612 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
613 |
+
) # There is a memcpy here, that is very bad.
|
614 |
+
indices_q = cu_seqlens_q[:-1]
|
615 |
+
query_layer = query_layer.squeeze(1)
|
616 |
+
else:
|
617 |
+
# The -q_len: slice assumes left padding.
|
618 |
+
attention_mask = attention_mask[:, -query_length:]
|
619 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
620 |
+
|
621 |
+
return (
|
622 |
+
query_layer,
|
623 |
+
key_layer,
|
624 |
+
value_layer,
|
625 |
+
indices_q,
|
626 |
+
(cu_seqlens_q, cu_seqlens_k),
|
627 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
628 |
+
)
|
629 |
+
|
630 |
+
|
631 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
|
632 |
+
class Qwen2SdpaAttention(Qwen2Attention):
|
633 |
+
"""
|
634 |
+
Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
635 |
+
`Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
636 |
+
SDPA API.
|
637 |
+
"""
|
638 |
+
|
639 |
+
# Adapted from Qwen2Attention.forward
|
640 |
+
def forward(
|
641 |
+
self,
|
642 |
+
hidden_states: torch.Tensor,
|
643 |
+
attention_mask: Optional[torch.Tensor] = None,
|
644 |
+
position_ids: Optional[torch.LongTensor] = None,
|
645 |
+
past_key_value: Optional[Cache] = None,
|
646 |
+
output_attentions: bool = False,
|
647 |
+
use_cache: bool = False,
|
648 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
649 |
+
if output_attentions:
|
650 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
651 |
+
logger.warning_once(
|
652 |
+
"Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
653 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
654 |
+
)
|
655 |
+
return super().forward(
|
656 |
+
hidden_states=hidden_states,
|
657 |
+
attention_mask=attention_mask,
|
658 |
+
position_ids=position_ids,
|
659 |
+
past_key_value=past_key_value,
|
660 |
+
output_attentions=output_attentions,
|
661 |
+
use_cache=use_cache,
|
662 |
+
)
|
663 |
+
|
664 |
+
bsz, q_len, _ = hidden_states.size()
|
665 |
+
|
666 |
+
query_states = self.q_proj(hidden_states)
|
667 |
+
key_states = self.k_proj(hidden_states)
|
668 |
+
value_states = self.v_proj(hidden_states)
|
669 |
+
|
670 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
671 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
672 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
673 |
+
|
674 |
+
kv_seq_len = key_states.shape[-2]
|
675 |
+
if past_key_value is not None:
|
676 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
677 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
678 |
+
|
679 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
680 |
+
|
681 |
+
if past_key_value is not None:
|
682 |
+
cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
|
683 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
684 |
+
|
685 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
686 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
687 |
+
|
688 |
+
if attention_mask is not None:
|
689 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
690 |
+
raise ValueError(
|
691 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
692 |
+
)
|
693 |
+
|
694 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
695 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
696 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
697 |
+
query_states = query_states.contiguous()
|
698 |
+
key_states = key_states.contiguous()
|
699 |
+
value_states = value_states.contiguous()
|
700 |
+
|
701 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
702 |
+
query_states,
|
703 |
+
key_states,
|
704 |
+
value_states,
|
705 |
+
attn_mask=attention_mask,
|
706 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
707 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
708 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
709 |
+
)
|
710 |
+
|
711 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
712 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
713 |
+
|
714 |
+
attn_output = self.o_proj(attn_output)
|
715 |
+
|
716 |
+
return attn_output, None, past_key_value
|
717 |
+
|
718 |
+
|
719 |
+
QWEN2_ATTENTION_CLASSES = {
|
720 |
+
"eager": Qwen2Attention,
|
721 |
+
"flash_attention_2": Qwen2FlashAttention2,
|
722 |
+
"sdpa": Qwen2SdpaAttention,
|
723 |
+
}
|
724 |
+
|
725 |
+
|
726 |
+
class Qwen2DecoderLayer(nn.Module):
|
727 |
+
def __init__(self, config: Qwen2Config, layer_idx: int):
|
728 |
+
super().__init__()
|
729 |
+
self.hidden_size = config.hidden_size
|
730 |
+
|
731 |
+
if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
|
732 |
+
logger.warning_once(
|
733 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
734 |
+
"unexpected results may be encountered."
|
735 |
+
)
|
736 |
+
self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
737 |
+
|
738 |
+
self.mlp = Qwen2MLP(config)
|
739 |
+
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
740 |
+
self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
741 |
+
|
742 |
+
def forward(
|
743 |
+
self,
|
744 |
+
hidden_states: torch.Tensor,
|
745 |
+
attention_mask: Optional[torch.Tensor] = None,
|
746 |
+
position_ids: Optional[torch.LongTensor] = None,
|
747 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
748 |
+
output_attentions: Optional[bool] = False,
|
749 |
+
use_cache: Optional[bool] = False,
|
750 |
+
**kwargs,
|
751 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
752 |
+
if "padding_mask" in kwargs:
|
753 |
+
warnings.warn(
|
754 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
755 |
+
"Please make sure use `attention_mask` instead.`"
|
756 |
+
)
|
757 |
+
"""
|
758 |
+
Args:
|
759 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
760 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
761 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
762 |
+
output_attentions (`bool`, *optional*):
|
763 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
764 |
+
returned tensors for more detail.
|
765 |
+
use_cache (`bool`, *optional*):
|
766 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
767 |
+
(see `past_key_values`).
|
768 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
769 |
+
"""
|
770 |
+
|
771 |
+
residual = hidden_states
|
772 |
+
|
773 |
+
hidden_states = self.input_layernorm(hidden_states)
|
774 |
+
|
775 |
+
# Self Attention
|
776 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
777 |
+
hidden_states=hidden_states,
|
778 |
+
attention_mask=attention_mask,
|
779 |
+
position_ids=position_ids,
|
780 |
+
past_key_value=past_key_value,
|
781 |
+
output_attentions=output_attentions,
|
782 |
+
use_cache=use_cache,
|
783 |
+
)
|
784 |
+
hidden_states = residual + hidden_states
|
785 |
+
|
786 |
+
# Fully Connected
|
787 |
+
residual = hidden_states
|
788 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
789 |
+
hidden_states = self.mlp(hidden_states)
|
790 |
+
hidden_states = residual + hidden_states
|
791 |
+
|
792 |
+
outputs = (hidden_states,)
|
793 |
+
|
794 |
+
if output_attentions:
|
795 |
+
outputs += (self_attn_weights,)
|
796 |
+
|
797 |
+
if use_cache:
|
798 |
+
outputs += (present_key_value,)
|
799 |
+
|
800 |
+
return outputs
|
801 |
+
|
802 |
+
|
803 |
+
QWEN2_START_DOCSTRING = r"""
|
804 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
805 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
806 |
+
etc.)
|
807 |
+
|
808 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
809 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
810 |
+
and behavior.
|
811 |
+
|
812 |
+
Parameters:
|
813 |
+
config ([`Qwen2Config`]):
|
814 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
815 |
+
load the weights associated with the model, only the configuration. Check out the
|
816 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
817 |
+
"""
|
818 |
+
|
819 |
+
|
820 |
+
@add_start_docstrings(
|
821 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
822 |
+
QWEN2_START_DOCSTRING,
|
823 |
+
)
|
824 |
+
class Qwen2PreTrainedModel(PreTrainedModel):
|
825 |
+
config_class = Qwen2Config
|
826 |
+
base_model_prefix = "model"
|
827 |
+
supports_gradient_checkpointing = True
|
828 |
+
_no_split_modules = ["Qwen2DecoderLayer"]
|
829 |
+
_skip_keys_device_placement = "past_key_values"
|
830 |
+
_supports_flash_attn_2 = True
|
831 |
+
_supports_sdpa = True
|
832 |
+
_supports_cache_class = True
|
833 |
+
|
834 |
+
def _init_weights(self, module):
|
835 |
+
std = self.config.initializer_range
|
836 |
+
if isinstance(module, nn.Linear):
|
837 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
838 |
+
if module.bias is not None:
|
839 |
+
module.bias.data.zero_()
|
840 |
+
elif isinstance(module, nn.Embedding):
|
841 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
842 |
+
if module.padding_idx is not None:
|
843 |
+
module.weight.data[module.padding_idx].zero_()
|
844 |
+
|
845 |
+
|
846 |
+
QWEN2_INPUTS_DOCSTRING = r"""
|
847 |
+
Args:
|
848 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
849 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
850 |
+
it.
|
851 |
+
|
852 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
853 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
854 |
+
|
855 |
+
[What are input IDs?](../glossary#input-ids)
|
856 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
857 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
858 |
+
|
859 |
+
- 1 for tokens that are **not masked**,
|
860 |
+
- 0 for tokens that are **masked**.
|
861 |
+
|
862 |
+
[What are attention masks?](../glossary#attention-mask)
|
863 |
+
|
864 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
865 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
866 |
+
|
867 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
868 |
+
`past_key_values`).
|
869 |
+
|
870 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
871 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
872 |
+
information on the default strategy.
|
873 |
+
|
874 |
+
- 1 indicates the head is **not masked**,
|
875 |
+
- 0 indicates the head is **masked**.
|
876 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
877 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
878 |
+
config.n_positions - 1]`.
|
879 |
+
|
880 |
+
[What are position IDs?](../glossary#position-ids)
|
881 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
882 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
883 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
884 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
885 |
+
|
886 |
+
Two formats are allowed:
|
887 |
+
- a [`~cache_utils.Cache`] instance;
|
888 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
889 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
890 |
+
cache format.
|
891 |
+
|
892 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
893 |
+
legacy cache format will be returned.
|
894 |
+
|
895 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
896 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
897 |
+
of shape `(batch_size, sequence_length)`.
|
898 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
899 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
900 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
901 |
+
model's internal embedding lookup matrix.
|
902 |
+
use_cache (`bool`, *optional*):
|
903 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
904 |
+
`past_key_values`).
|
905 |
+
output_attentions (`bool`, *optional*):
|
906 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
907 |
+
tensors for more detail.
|
908 |
+
output_hidden_states (`bool`, *optional*):
|
909 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
910 |
+
more detail.
|
911 |
+
return_dict (`bool`, *optional*):
|
912 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
913 |
+
"""
|
914 |
+
|
915 |
+
|
916 |
+
@add_start_docstrings(
|
917 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
918 |
+
QWEN2_START_DOCSTRING,
|
919 |
+
)
|
920 |
+
class Qwen2Model_Flash(Qwen2PreTrainedModel):
|
921 |
+
"""
|
922 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
|
923 |
+
|
924 |
+
Args:
|
925 |
+
config: Qwen2Config
|
926 |
+
"""
|
927 |
+
|
928 |
+
def __init__(self, config: Qwen2Config):
|
929 |
+
super().__init__(config)
|
930 |
+
self.padding_idx = config.pad_token_id
|
931 |
+
self.vocab_size = config.vocab_size
|
932 |
+
|
933 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
934 |
+
self.layers = nn.ModuleList(
|
935 |
+
[Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
936 |
+
)
|
937 |
+
self._attn_implementation = config._attn_implementation
|
938 |
+
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
939 |
+
|
940 |
+
self.gradient_checkpointing = False
|
941 |
+
|
942 |
+
# Initialize weights and apply final processing
|
943 |
+
self.post_init()
|
944 |
+
|
945 |
+
def get_input_embeddings(self):
|
946 |
+
return self.embed_tokens
|
947 |
+
|
948 |
+
def set_input_embeddings(self, value):
|
949 |
+
self.embed_tokens = value
|
950 |
+
|
951 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
952 |
+
def forward(
|
953 |
+
self,
|
954 |
+
input_ids: torch.LongTensor = None,
|
955 |
+
attention_mask: Optional[torch.Tensor] = None,
|
956 |
+
position_ids: Optional[torch.LongTensor] = None,
|
957 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
958 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
959 |
+
use_cache: Optional[bool] = None,
|
960 |
+
output_attentions: Optional[bool] = None,
|
961 |
+
output_hidden_states: Optional[bool] = None,
|
962 |
+
return_dict: Optional[bool] = None,
|
963 |
+
labels: Optional[torch.Tensor] = None,
|
964 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
965 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
966 |
+
output_hidden_states = (
|
967 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
968 |
+
)
|
969 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
970 |
+
|
971 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
972 |
+
|
973 |
+
# retrieve input_ids and inputs_embeds
|
974 |
+
if input_ids is not None and inputs_embeds is not None:
|
975 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
976 |
+
elif input_ids is not None:
|
977 |
+
batch_size, seq_length = input_ids.shape
|
978 |
+
elif inputs_embeds is not None:
|
979 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
980 |
+
else:
|
981 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
982 |
+
|
983 |
+
if self.gradient_checkpointing and self.training:
|
984 |
+
if use_cache:
|
985 |
+
logger.warning_once(
|
986 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
987 |
+
)
|
988 |
+
use_cache = False
|
989 |
+
|
990 |
+
past_key_values_length = 0
|
991 |
+
|
992 |
+
if use_cache:
|
993 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
994 |
+
if use_legacy_cache:
|
995 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
996 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
997 |
+
|
998 |
+
if position_ids is None:
|
999 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1000 |
+
position_ids = torch.arange(
|
1001 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
1002 |
+
)
|
1003 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
1004 |
+
else:
|
1005 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
1006 |
+
|
1007 |
+
if inputs_embeds is None:
|
1008 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1009 |
+
|
1010 |
+
if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
|
1011 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
1012 |
+
if is_padding_right:
|
1013 |
+
raise ValueError(
|
1014 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
1015 |
+
" this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
|
1016 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
if self._attn_implementation == "flash_attention_2":
|
1020 |
+
# 2d mask is passed through the layers
|
1021 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
1022 |
+
elif self._attn_implementation == "sdpa" and not output_attentions:
|
1023 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
1024 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
1025 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
1026 |
+
attention_mask,
|
1027 |
+
(batch_size, seq_length),
|
1028 |
+
inputs_embeds,
|
1029 |
+
past_key_values_length,
|
1030 |
+
)
|
1031 |
+
else:
|
1032 |
+
# 4d mask is passed through the layers
|
1033 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1034 |
+
attention_mask,
|
1035 |
+
(batch_size, seq_length),
|
1036 |
+
inputs_embeds,
|
1037 |
+
past_key_values_length,
|
1038 |
+
sliding_window=self.config.sliding_window,
|
1039 |
+
)
|
1040 |
+
|
1041 |
+
hidden_states = inputs_embeds
|
1042 |
+
|
1043 |
+
# decoder layers
|
1044 |
+
all_hidden_states = () if output_hidden_states else None
|
1045 |
+
all_self_attns = () if output_attentions else None
|
1046 |
+
next_decoder_cache = None
|
1047 |
+
|
1048 |
+
for layer_idx, decoder_layer in enumerate(self.layers):
|
1049 |
+
if output_hidden_states:
|
1050 |
+
all_hidden_states += (hidden_states,)
|
1051 |
+
|
1052 |
+
if self.gradient_checkpointing and self.training:
|
1053 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1054 |
+
decoder_layer.__call__,
|
1055 |
+
hidden_states,
|
1056 |
+
attention_mask,
|
1057 |
+
position_ids,
|
1058 |
+
past_key_values,
|
1059 |
+
output_attentions,
|
1060 |
+
use_cache,
|
1061 |
+
)
|
1062 |
+
else:
|
1063 |
+
layer_outputs = decoder_layer(
|
1064 |
+
hidden_states,
|
1065 |
+
attention_mask=attention_mask,
|
1066 |
+
position_ids=position_ids,
|
1067 |
+
past_key_value=past_key_values,
|
1068 |
+
output_attentions=output_attentions,
|
1069 |
+
use_cache=use_cache,
|
1070 |
+
)
|
1071 |
+
|
1072 |
+
hidden_states = layer_outputs[0]
|
1073 |
+
|
1074 |
+
if use_cache:
|
1075 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
1076 |
+
|
1077 |
+
if output_attentions:
|
1078 |
+
all_self_attns += (layer_outputs[1],)
|
1079 |
+
|
1080 |
+
###### copy from pdrop #########
|
1081 |
+
# rank & drop after specific layer
|
1082 |
+
# only drop in prefill stage when inference
|
1083 |
+
rank_layer = layer_idx+1
|
1084 |
+
if rank_layer in self.llm_compress_layer_list:
|
1085 |
+
if hidden_states.shape[1] != 1: # prefill stage or training
|
1086 |
+
stage = self.llm_compress_layer_list.index(rank_layer) # determine current stage
|
1087 |
+
(
|
1088 |
+
position_ids,
|
1089 |
+
attention_mask,
|
1090 |
+
hidden_states,
|
1091 |
+
labels # update labels and return
|
1092 |
+
) = self.video_level_compress(
|
1093 |
+
cur_num = stage,
|
1094 |
+
rank_layer = rank_layer,
|
1095 |
+
features = hidden_states,
|
1096 |
+
position_ids=position_ids,
|
1097 |
+
attention_mask=attention_mask,
|
1098 |
+
labels = labels
|
1099 |
+
)
|
1100 |
+
|
1101 |
+
# process attention_mask again after updating
|
1102 |
+
if self._attn_implementation == "flash_attention_2":
|
1103 |
+
# 2d mask is passed through the layers
|
1104 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
1105 |
+
elif self._attn_implementation == "sdpa" and not output_attentions:
|
1106 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
1107 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
1108 |
+
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
|
1109 |
+
attention_mask,
|
1110 |
+
(batch_size, hidden_states.shape[1]),
|
1111 |
+
hidden_states,
|
1112 |
+
past_key_values_length,
|
1113 |
+
)
|
1114 |
+
else:
|
1115 |
+
# 4d mask is passed through the layers
|
1116 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1117 |
+
attention_mask,
|
1118 |
+
(batch_size, hidden_states.shape[1]),
|
1119 |
+
hidden_states,
|
1120 |
+
past_key_values_length,
|
1121 |
+
sliding_window=self.config.sliding_window,
|
1122 |
+
)
|
1123 |
+
|
1124 |
+
else:
|
1125 |
+
# update position_ids in decoding stage when inference
|
1126 |
+
stage = self.llm_compress_layer_list.index(rank_layer) # determine current stage
|
1127 |
+
cur_visual_length = [int(cur_image_token * self.llm_image_token_ratio_list[stage]) for cur_image_token in self.num_image_token_lens]
|
1128 |
+
next_visual_length = [int(cur_image_token * self.llm_image_token_ratio_list[stage + 1]) for cur_image_token in self.num_image_token_lens]
|
1129 |
+
new_position_ids = []
|
1130 |
+
for idx, cur_position_ids in enumerate(position_ids):
|
1131 |
+
cur_position_ids = cur_position_ids - (cur_visual_length[idx] - next_visual_length[idx])
|
1132 |
+
new_position_ids.append(cur_position_ids)
|
1133 |
+
assert idx == 0, idx
|
1134 |
+
position_ids = torch.tensor(new_position_ids, dtype=torch.long).unsqueeze(0)
|
1135 |
+
|
1136 |
+
#################
|
1137 |
+
|
1138 |
+
hidden_states = self.norm(hidden_states)
|
1139 |
+
|
1140 |
+
# add hidden states from the last decoder layer
|
1141 |
+
if output_hidden_states:
|
1142 |
+
all_hidden_states += (hidden_states,)
|
1143 |
+
|
1144 |
+
next_cache = None
|
1145 |
+
if use_cache:
|
1146 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
1147 |
+
|
1148 |
+
if not return_dict:
|
1149 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None), labels
|
1150 |
+
return BaseModelOutputWithPast(
|
1151 |
+
last_hidden_state=hidden_states,
|
1152 |
+
past_key_values=next_cache,
|
1153 |
+
hidden_states=all_hidden_states,
|
1154 |
+
attentions=all_self_attns,
|
1155 |
+
), labels
|
1156 |
+
|
1157 |
+
|
1158 |
+
# implementation of pdrop
|
1159 |
+
def video_level_compress(
|
1160 |
+
self, cur_num, rank_layer, features ,
|
1161 |
+
position_ids, attention_mask, labels
|
1162 |
+
):
|
1163 |
+
|
1164 |
+
if self.llm_compress_type == 'uniform0_attention':
|
1165 |
+
if cur_num == 0:
|
1166 |
+
llm_compress_type = 'uniform'
|
1167 |
+
else:
|
1168 |
+
llm_compress_type = 'attention'
|
1169 |
+
else:
|
1170 |
+
llm_compress_type = self.llm_compress_type
|
1171 |
+
|
1172 |
+
_labels = labels
|
1173 |
+
_position_ids = position_ids
|
1174 |
+
_attention_mask = attention_mask
|
1175 |
+
|
1176 |
+
if position_ids is None:
|
1177 |
+
position_ids = torch.arange(0, features.shape[1], dtype=torch.long, device=features.device).unsqueeze(0)
|
1178 |
+
|
1179 |
+
if getattr(self.config, 'tokenizer_padding_side', 'right') == "right":
|
1180 |
+
|
1181 |
+
batch_size = features.shape[0]
|
1182 |
+
image_tokens = [int(cur_image_token * self.llm_image_token_ratio_list[cur_num]) for cur_image_token in self.num_image_token_lens]
|
1183 |
+
keep_length = [int(cur_image_token * self.llm_image_token_ratio_list[cur_num + 1]) for cur_image_token in self.num_image_token_lens]
|
1184 |
+
|
1185 |
+
features_list = []
|
1186 |
+
attention_mask_list = []
|
1187 |
+
labels_list = []
|
1188 |
+
|
1189 |
+
if attention_mask is None:
|
1190 |
+
attention_mask = torch.ones((batch_size,features.shape[1]), dtype=torch.bool, device=features.device)
|
1191 |
+
else:
|
1192 |
+
attention_mask = attention_mask.bool()
|
1193 |
+
if labels is None:
|
1194 |
+
labels = torch.full((batch_size,features.shape[1]), IGNORE_INDEX, device=features.device)
|
1195 |
+
|
1196 |
+
|
1197 |
+
if 'attention' in llm_compress_type:
|
1198 |
+
# obtain query_states and key_states to calculate attention map
|
1199 |
+
hidden_states= features.clone().detach()
|
1200 |
+
|
1201 |
+
self_attn = self.layers[rank_layer].self_attn
|
1202 |
+
hidden_states = self.layers[rank_layer].input_layernorm(hidden_states)
|
1203 |
+
|
1204 |
+
num_heads = self_attn.num_heads
|
1205 |
+
num_key_value_heads = self_attn.num_key_value_heads
|
1206 |
+
head_dim = self_attn.head_dim
|
1207 |
+
|
1208 |
+
bsz, q_len, _ = hidden_states.size()
|
1209 |
+
|
1210 |
+
query_states = self_attn.q_proj(hidden_states)
|
1211 |
+
key_states = self_attn.k_proj(hidden_states)
|
1212 |
+
value_states = self_attn.v_proj(hidden_states)
|
1213 |
+
|
1214 |
+
query_states = query_states.view(bsz, q_len, num_heads, head_dim).transpose(1, 2)
|
1215 |
+
key_states = key_states.view(bsz, q_len, num_key_value_heads, head_dim).transpose(1, 2)
|
1216 |
+
value_states = value_states.view(bsz, q_len, num_key_value_heads, head_dim).transpose(1, 2)
|
1217 |
+
|
1218 |
+
kv_seq_len = key_states.shape[-2]
|
1219 |
+
cos, sin = self_attn.rotary_emb(value_states, seq_len=kv_seq_len)
|
1220 |
+
|
1221 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
1222 |
+
key_states = repeat_kv(key_states, self_attn.num_key_value_groups)
|
1223 |
+
|
1224 |
+
# attention_mask
|
1225 |
+
eager_attention_mask = _prepare_4d_causal_attention_mask(
|
1226 |
+
attention_mask, (batch_size, q_len), hidden_states, past_key_values_length=0
|
1227 |
+
).to(device=query_states.device)
|
1228 |
+
|
1229 |
+
# take valid features
|
1230 |
+
features = [cur_features[cur_attention_mask] for cur_features, cur_attention_mask in zip(features, attention_mask)]
|
1231 |
+
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
|
1232 |
+
attention_mask = [cur_attention_mask[cur_attention_mask] for cur_attention_mask, cur_attention_mask in zip(attention_mask, attention_mask)]
|
1233 |
+
|
1234 |
+
# rank & drop
|
1235 |
+
for i in range(batch_size):
|
1236 |
+
image_index = self.first_image_token_position[i]
|
1237 |
+
if image_index == -1:
|
1238 |
+
cur_input_embeds = features[i]
|
1239 |
+
features_list.append(cur_input_embeds)
|
1240 |
+
attention_mask_list.append(attention_mask[i])
|
1241 |
+
labels_list.append(labels[i])
|
1242 |
+
continue
|
1243 |
+
|
1244 |
+
if 'attention' in llm_compress_type:
|
1245 |
+
|
1246 |
+
# obtain current states
|
1247 |
+
cur_key_states = key_states[i]
|
1248 |
+
cur_query_states = query_states[i]
|
1249 |
+
cur_eager_attention_mask = eager_attention_mask[i]
|
1250 |
+
|
1251 |
+
# choose last instruction token as query
|
1252 |
+
if self.training:
|
1253 |
+
answer_index = torch.where(labels[i] != -100)[0].tolist()
|
1254 |
+
index_before_answer = []
|
1255 |
+
for index in answer_index:
|
1256 |
+
if labels[i][index-1] == -100:
|
1257 |
+
index_before_answer.append(index-1)
|
1258 |
+
if index_before_answer == []:
|
1259 |
+
cur_input_embeds = features[i]
|
1260 |
+
features_list.append(cur_input_embeds)
|
1261 |
+
attention_mask_list.append(attention_mask[i])
|
1262 |
+
labels_list.append(labels[i])
|
1263 |
+
continue
|
1264 |
+
|
1265 |
+
index_before_answer=torch.tensor(index_before_answer,device=labels[0].device)
|
1266 |
+
text_query_states = cur_query_states[:,index_before_answer,:]
|
1267 |
+
text_eager_attention_mask = cur_eager_attention_mask[:,index_before_answer,:]
|
1268 |
+
|
1269 |
+
else:
|
1270 |
+
prompt_total_len = self.text_prompt_lens[i] + image_tokens[i]
|
1271 |
+
text_query_states = cur_query_states[:,prompt_total_len-1,:].unsqueeze(1)
|
1272 |
+
text_eager_attention_mask = cur_eager_attention_mask[:,prompt_total_len-1,:].unsqueeze(1)
|
1273 |
+
|
1274 |
+
# calculate attention map
|
1275 |
+
attn_weights = torch.matmul(text_query_states, cur_key_states.transpose(1, 2)) / math.sqrt(head_dim) #(num_head, text_token,seq_len)
|
1276 |
+
attn_weights = attn_weights + text_eager_attention_mask
|
1277 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) #(num_head, text_token,seq_len)
|
1278 |
+
|
1279 |
+
attention_avg_head = torch.mean(attn_weights, dim=0) # ave across heads
|
1280 |
+
attention_avg_head = attention_avg_head[:,image_index:image_index+image_tokens[i]] # select image token as keys
|
1281 |
+
attention_avg_text = torch.mean(attention_avg_head, dim=0) # (576)
|
1282 |
+
|
1283 |
+
if llm_compress_type == 'attention':
|
1284 |
+
top_rank_index = attention_avg_text.topk(keep_length[i]).indices
|
1285 |
+
else:
|
1286 |
+
raise NotImplementedError(llm_compress_type)
|
1287 |
+
|
1288 |
+
elif llm_compress_type == 'uniform':
|
1289 |
+
top_rank_index = torch.linspace(0, image_tokens[i]-1, keep_length[i], dtype=torch.long)
|
1290 |
+
else:
|
1291 |
+
raise NotImplementedError(llm_compress_type)
|
1292 |
+
|
1293 |
+
top_rank_index = top_rank_index + image_index
|
1294 |
+
top_rank_index= top_rank_index.sort().values
|
1295 |
+
|
1296 |
+
start_index = image_index + image_tokens[i]
|
1297 |
+
new_input_embeds = torch.cat([features[i][ :image_index, :] ,features[i][ top_rank_index, :], features[i][start_index:, :]], dim=0)
|
1298 |
+
|
1299 |
+
new_labels = torch.cat([labels[i][ :image_index],labels[i][ top_rank_index], labels[i][start_index:]], dim=0)
|
1300 |
+
new_attention_mask = torch.cat([attention_mask[i][:image_index], attention_mask[i][top_rank_index], attention_mask[i][start_index:]], dim=0)
|
1301 |
+
|
1302 |
+
features_list.append(new_input_embeds)
|
1303 |
+
attention_mask_list.append(new_attention_mask)
|
1304 |
+
labels_list.append(new_labels)
|
1305 |
+
|
1306 |
+
# Truncate sequences to max length as image embeddings can make the sequence longer
|
1307 |
+
tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
|
1308 |
+
if tokenizer_model_max_length is not None:
|
1309 |
+
new_input_embeds = [x[:tokenizer_model_max_length] for x in features_list]
|
1310 |
+
new_attention_mask = [x[:tokenizer_model_max_length] for x in attention_mask_list]
|
1311 |
+
new_labels = [x[:tokenizer_model_max_length] for x in labels_list]
|
1312 |
+
|
1313 |
+
max_len = max(x.shape[0] for x in new_input_embeds)
|
1314 |
+
|
1315 |
+
# padding the sequences to form batch
|
1316 |
+
embeds_padded=[]
|
1317 |
+
labels_paded=[]
|
1318 |
+
attention_mask_padded=[]
|
1319 |
+
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
|
1320 |
+
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
|
1321 |
+
cur_len_emb=cur_new_embed.shape[0]
|
1322 |
+
dif=max_len - cur_len_emb # padding to longest seq
|
1323 |
+
|
1324 |
+
cur_new_embed = torch.cat([cur_new_embed,torch.zeros((dif, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)],dim=0)
|
1325 |
+
cur_new_labels = torch.cat([cur_new_labels,torch.full((dif,),IGNORE_INDEX,dtype=cur_new_labels.dtype, device=cur_new_labels.device)],dim=0)
|
1326 |
+
cur_attention_mask = new_attention_mask[i]
|
1327 |
+
cur_attention_mask = torch.cat([cur_attention_mask,torch.full((dif,),False, dtype=cur_attention_mask.dtype, device=cur_attention_mask.device)],dim=0)
|
1328 |
+
|
1329 |
+
embeds_padded.append(cur_new_embed)
|
1330 |
+
labels_paded.append(cur_new_labels)
|
1331 |
+
attention_mask_padded.append(cur_attention_mask)
|
1332 |
+
|
1333 |
+
cur_len = new_attention_mask[i].sum().item()
|
1334 |
+
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
1335 |
+
|
1336 |
+
|
1337 |
+
new_input_embeds = torch.stack(embeds_padded,dim=0)
|
1338 |
+
new_input_embeds = new_input_embeds.to(features[0].dtype)
|
1339 |
+
|
1340 |
+
new_attention_mask = torch.stack(attention_mask_padded,dim=0)
|
1341 |
+
new_labels = torch.stack(labels_paded,dim=0)
|
1342 |
+
|
1343 |
+
if _position_ids is None:
|
1344 |
+
position_ids = None
|
1345 |
+
if _labels is None:
|
1346 |
+
new_labels = None
|
1347 |
+
|
1348 |
+
if _attention_mask is None:
|
1349 |
+
new_attention_mask = None
|
1350 |
+
else:
|
1351 |
+
new_attention_mask = new_attention_mask.to(dtype=_attention_mask.dtype)
|
1352 |
+
|
1353 |
+
return position_ids, new_attention_mask, new_input_embeds, new_labels
|
1354 |
+
|
1355 |
+
else:
|
1356 |
+
raise ValueError(f"Unexpected tokenizer_padding_side: {self.config.tokenizer_padding_side}")
|
1357 |
+
|
1358 |
+
|
1359 |
+
class Qwen2ForCausalLM_Flash(Qwen2PreTrainedModel):
|
1360 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1361 |
+
|
1362 |
+
def __init__(self, config):
|
1363 |
+
super().__init__(config)
|
1364 |
+
self.model = Qwen2Model_Flash(config)
|
1365 |
+
self.vocab_size = config.vocab_size
|
1366 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1367 |
+
|
1368 |
+
# Initialize weights and apply final processing
|
1369 |
+
self.post_init()
|
1370 |
+
|
1371 |
+
def get_input_embeddings(self):
|
1372 |
+
return self.model.embed_tokens
|
1373 |
+
|
1374 |
+
def set_input_embeddings(self, value):
|
1375 |
+
self.model.embed_tokens = value
|
1376 |
+
|
1377 |
+
def get_output_embeddings(self):
|
1378 |
+
return self.lm_head
|
1379 |
+
|
1380 |
+
def set_output_embeddings(self, new_embeddings):
|
1381 |
+
self.lm_head = new_embeddings
|
1382 |
+
|
1383 |
+
def set_decoder(self, decoder):
|
1384 |
+
self.model = decoder
|
1385 |
+
|
1386 |
+
def get_decoder(self):
|
1387 |
+
return self.model
|
1388 |
+
|
1389 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
1390 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1391 |
+
def forward(
|
1392 |
+
self,
|
1393 |
+
input_ids: torch.LongTensor = None,
|
1394 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1395 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1396 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1397 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1398 |
+
labels: Optional[torch.LongTensor] = None,
|
1399 |
+
use_cache: Optional[bool] = None,
|
1400 |
+
output_attentions: Optional[bool] = None,
|
1401 |
+
output_hidden_states: Optional[bool] = None,
|
1402 |
+
return_dict: Optional[bool] = None,
|
1403 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1404 |
+
r"""
|
1405 |
+
Args:
|
1406 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1407 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1408 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1409 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1410 |
+
|
1411 |
+
Returns:
|
1412 |
+
|
1413 |
+
Example:
|
1414 |
+
|
1415 |
+
```python
|
1416 |
+
>>> from transformers import AutoTokenizer, Qwen2ForCausalLM
|
1417 |
+
|
1418 |
+
>>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1419 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1420 |
+
|
1421 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1422 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1423 |
+
|
1424 |
+
>>> # Generate
|
1425 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1426 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1427 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
1428 |
+
```"""
|
1429 |
+
|
1430 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1431 |
+
output_hidden_states = (
|
1432 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1433 |
+
)
|
1434 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1435 |
+
|
1436 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1437 |
+
outputs, labels = self.model(
|
1438 |
+
input_ids=input_ids,
|
1439 |
+
attention_mask=attention_mask,
|
1440 |
+
position_ids=position_ids,
|
1441 |
+
past_key_values=past_key_values,
|
1442 |
+
inputs_embeds=inputs_embeds,
|
1443 |
+
use_cache=use_cache,
|
1444 |
+
output_attentions=output_attentions,
|
1445 |
+
output_hidden_states=output_hidden_states,
|
1446 |
+
return_dict=return_dict,
|
1447 |
+
labels=labels
|
1448 |
+
)
|
1449 |
+
|
1450 |
+
hidden_states = outputs[0]
|
1451 |
+
logits = self.lm_head(hidden_states)
|
1452 |
+
logits = logits.float()
|
1453 |
+
|
1454 |
+
loss = None
|
1455 |
+
if labels is not None:
|
1456 |
+
# Shift so that tokens < n predict n
|
1457 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1458 |
+
shift_labels = labels[..., 1:].contiguous()
|
1459 |
+
# Flatten the tokens
|
1460 |
+
loss_fct = CrossEntropyLoss()
|
1461 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1462 |
+
shift_labels = shift_labels.view(-1)
|
1463 |
+
# Enable model parallelism
|
1464 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1465 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1466 |
+
|
1467 |
+
if not return_dict:
|
1468 |
+
output = (logits,) + outputs[1:]
|
1469 |
+
return (loss,) + output if loss is not None else output
|
1470 |
+
|
1471 |
+
return CausalLMOutputWithPast(
|
1472 |
+
loss=loss,
|
1473 |
+
logits=logits,
|
1474 |
+
past_key_values=outputs.past_key_values,
|
1475 |
+
hidden_states=outputs.hidden_states,
|
1476 |
+
attentions=outputs.attentions,
|
1477 |
+
)
|
1478 |
+
|
1479 |
+
def prepare_inputs_for_generation(
|
1480 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1481 |
+
):
|
1482 |
+
# Omit tokens covered by past_key_values
|
1483 |
+
if past_key_values is not None:
|
1484 |
+
if isinstance(past_key_values, Cache):
|
1485 |
+
cache_length = past_key_values.get_seq_length()
|
1486 |
+
past_length = past_key_values.seen_tokens
|
1487 |
+
max_cache_length = past_key_values.get_max_length()
|
1488 |
+
else:
|
1489 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
1490 |
+
max_cache_length = None
|
1491 |
+
|
1492 |
+
# Keep only the unprocessed tokens:
|
1493 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1494 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
1495 |
+
# input)
|
1496 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1497 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1498 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1499 |
+
# input_ids based on the past_length.
|
1500 |
+
elif past_length < input_ids.shape[1]:
|
1501 |
+
input_ids = input_ids[:, past_length:]
|
1502 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1503 |
+
|
1504 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1505 |
+
if (
|
1506 |
+
max_cache_length is not None
|
1507 |
+
and attention_mask is not None
|
1508 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1509 |
+
):
|
1510 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1511 |
+
|
1512 |
+
position_ids = kwargs.get("position_ids", None)
|
1513 |
+
if attention_mask is not None and position_ids is None:
|
1514 |
+
# create position_ids on the fly for batch generation
|
1515 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1516 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1517 |
+
if past_key_values:
|
1518 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1519 |
+
|
1520 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1521 |
+
if inputs_embeds is not None and past_key_values is None:
|
1522 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1523 |
+
else:
|
1524 |
+
model_inputs = {"input_ids": input_ids}
|
1525 |
+
|
1526 |
+
model_inputs.update(
|
1527 |
+
{
|
1528 |
+
"position_ids": position_ids,
|
1529 |
+
"past_key_values": past_key_values,
|
1530 |
+
"use_cache": kwargs.get("use_cache"),
|
1531 |
+
"attention_mask": attention_mask,
|
1532 |
+
}
|
1533 |
+
)
|
1534 |
+
return model_inputs
|
1535 |
+
|
1536 |
+
@staticmethod
|
1537 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1538 |
+
reordered_past = ()
|
1539 |
+
for layer_past in past_key_values:
|
1540 |
+
reordered_past += (
|
1541 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1542 |
+
)
|
1543 |
+
return reordered_past
|
1544 |
+
|
1545 |
+
|
modeling_videochat_flash.py
ADDED
@@ -0,0 +1,713 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
import re
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
import random
|
20 |
+
from typing import List, Optional, Tuple, Union, Dict
|
21 |
+
|
22 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
23 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
24 |
+
from transformers.generation.utils import GenerateOutput
|
25 |
+
from transformers import Qwen2Config
|
26 |
+
|
27 |
+
from .vision_tower_builder import build_vision_tower
|
28 |
+
from .mm_projector_builder import build_vision_projector
|
29 |
+
|
30 |
+
from .constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_IMAGE_TOKEN
|
31 |
+
from .conversation import conv_templates, SeparatorStyle
|
32 |
+
from .mm_utils import tokenizer_image_token, KeywordsStoppingCriteria, get_anyres_image_grid_shape, load_video
|
33 |
+
from .modeling_qwen2_flash import Qwen2Model_Flash, Qwen2ForCausalLM_Flash
|
34 |
+
|
35 |
+
|
36 |
+
class LlavaMetaModel:
|
37 |
+
|
38 |
+
def __init__(self, config):
|
39 |
+
super(LlavaMetaModel, self).__init__(config)
|
40 |
+
|
41 |
+
if hasattr(config, "mm_vision_tower"):
|
42 |
+
delay_load = getattr(config, "delay_load", False)
|
43 |
+
self.vision_tower = build_vision_tower(config, delay_load=delay_load)
|
44 |
+
self.mm_projector = build_vision_projector(config, vision_cfg=self.vision_tower.config)
|
45 |
+
|
46 |
+
if "unpad" in getattr(config, "mm_patch_merge_type", ""):
|
47 |
+
self.image_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
|
48 |
+
if "nopad" in getattr(config, "mm_patch_merge_type", "") and getattr(self.config, "mm_newline_position", "nothing") != "nothing":
|
49 |
+
self.frame_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
|
50 |
+
|
51 |
+
def get_vision_tower(self):
|
52 |
+
vision_tower = getattr(self, "vision_tower", None)
|
53 |
+
if type(vision_tower) is list:
|
54 |
+
vision_tower = vision_tower[0]
|
55 |
+
return vision_tower
|
56 |
+
|
57 |
+
def initialize_vision_modules(self, model_args, fsdp=None):
|
58 |
+
vision_tower = model_args.vision_tower
|
59 |
+
mm_vision_select_layer = model_args.mm_vision_select_layer
|
60 |
+
mm_vision_select_feature = model_args.mm_vision_select_feature
|
61 |
+
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
|
62 |
+
mm_patch_merge_type = model_args.mm_patch_merge_type
|
63 |
+
|
64 |
+
self.config.mm_vision_tower = vision_tower
|
65 |
+
self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
|
66 |
+
|
67 |
+
if self.get_vision_tower() is None:
|
68 |
+
vision_tower = build_vision_tower(model_args)
|
69 |
+
|
70 |
+
if fsdp is not None and len(fsdp) > 0:
|
71 |
+
self.vision_tower = [vision_tower]
|
72 |
+
else:
|
73 |
+
self.vision_tower = vision_tower
|
74 |
+
else:
|
75 |
+
if fsdp is not None and len(fsdp) > 0:
|
76 |
+
vision_tower = self.vision_tower[0]
|
77 |
+
else:
|
78 |
+
vision_tower = self.vision_tower
|
79 |
+
vision_tower.load_model()
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
self.config.use_mm_proj = True
|
84 |
+
self.config.mm_projector_type = getattr(model_args, "mm_projector_type", "linear")
|
85 |
+
self.config.mm_vision_select_layer = mm_vision_select_layer
|
86 |
+
self.config.mm_vision_select_feature = mm_vision_select_feature
|
87 |
+
self.config.mm_patch_merge_type = mm_patch_merge_type
|
88 |
+
|
89 |
+
if getattr(self, "mm_projector", None) is None:
|
90 |
+
self.mm_projector = build_vision_projector(self.config, vision_cfg=vision_tower.config)
|
91 |
+
|
92 |
+
if "unpad" in mm_patch_merge_type:
|
93 |
+
embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
|
94 |
+
self.image_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
|
95 |
+
if "nopad" in getattr(self.config, "mm_patch_merge_type", "") and getattr(self.config, "mm_newline_position", "nothing") != "nothing":
|
96 |
+
embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
|
97 |
+
self.frame_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
|
98 |
+
else:
|
99 |
+
# In case it is frozen by LoRA
|
100 |
+
for p in self.mm_projector.parameters():
|
101 |
+
p.requires_grad = True
|
102 |
+
|
103 |
+
if pretrain_mm_mlp_adapter is not None:
|
104 |
+
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location="cpu")
|
105 |
+
|
106 |
+
def get_w(weights, keyword):
|
107 |
+
return {k.split(keyword + ".")[1]: v for k, v in weights.items() if keyword in k}
|
108 |
+
|
109 |
+
if self.config.mm_projector_type =='lxh_qformer':
|
110 |
+
incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"), strict=False)
|
111 |
+
else:
|
112 |
+
incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"))
|
113 |
+
print(f"Loaded mm projector weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
|
114 |
+
|
115 |
+
|
116 |
+
class LlavaMetaForCausalLM(ABC):
|
117 |
+
|
118 |
+
@abstractmethod
|
119 |
+
def get_model(self):
|
120 |
+
pass
|
121 |
+
|
122 |
+
def get_vision_tower(self):
|
123 |
+
return self.get_model().get_vision_tower()
|
124 |
+
|
125 |
+
|
126 |
+
def encode_video_image(self, images_list, video_idx_in_batch):
|
127 |
+
# video encoder编码后按图像的connector处理
|
128 |
+
bs = len(images_list)
|
129 |
+
|
130 |
+
concat_images = []
|
131 |
+
concat_videos = []
|
132 |
+
for idx, image in enumerate(images_list):
|
133 |
+
if idx in video_idx_in_batch:
|
134 |
+
concat_videos.append(image)
|
135 |
+
else:
|
136 |
+
concat_images.append(image)
|
137 |
+
# print(concat_videos[0].shape)
|
138 |
+
has_image = len(concat_images) > 0
|
139 |
+
has_video = len(concat_videos) > 0
|
140 |
+
|
141 |
+
mm_local_num_frames = getattr(self.config, "mm_local_num_frames", -1)
|
142 |
+
assert mm_local_num_frames != -1
|
143 |
+
if has_image:
|
144 |
+
image_split_sizes = [image.shape[0] for image in concat_images]
|
145 |
+
concat_images = torch.cat([image.unsqueeze(1) for image in concat_images], dim=0)
|
146 |
+
# print("input vit image.shape:", concat_images.shape)
|
147 |
+
images_features = self.get_model().get_vision_tower()(concat_images) # B_i, N, D
|
148 |
+
images_features = torch.split(images_features, image_split_sizes)
|
149 |
+
|
150 |
+
if has_video:
|
151 |
+
video_split_sizes = [video.shape[0] // mm_local_num_frames for video in concat_videos]
|
152 |
+
concat_videos = torch.cat([video.reshape(video.shape[0] // mm_local_num_frames, mm_local_num_frames, video.shape[1], video.shape[2], video.shape[3]) for video in concat_videos], dim=0)
|
153 |
+
# print("input vit video.shape:", concat_videos.shape)
|
154 |
+
videos_features = self.get_model().get_vision_tower()(concat_videos) # B_v, N, D
|
155 |
+
videos_features = [v.reshape(-1, v.shape[-2] // mm_local_num_frames, v.shape[-1]) for v in torch.split(videos_features, video_split_sizes)]
|
156 |
+
|
157 |
+
|
158 |
+
all_videos_or_images_features = []
|
159 |
+
img_idx = 0
|
160 |
+
vid_idx = 0
|
161 |
+
|
162 |
+
for idx in range(bs):
|
163 |
+
|
164 |
+
if idx in video_idx_in_batch:
|
165 |
+
feat = self.get_model().mm_projector(videos_features[vid_idx], compress=True, local_num_frames=getattr(self.config, "mm_local_num_frames", -1))
|
166 |
+
|
167 |
+
vid_idx += 1
|
168 |
+
else:
|
169 |
+
feat = self.get_model().mm_projector(images_features[img_idx], compress=False)
|
170 |
+
img_idx += 1
|
171 |
+
# print("video_idx_in_batch:", video_idx_in_batch)
|
172 |
+
all_videos_or_images_features.append(feat)
|
173 |
+
|
174 |
+
if has_video:
|
175 |
+
assert vid_idx == len(videos_features), f"vid: {vid_idx} != {len(videos_features)}"
|
176 |
+
if has_image:
|
177 |
+
assert img_idx == len(images_features), f"img: {img_idx} != {len(images_features)}"
|
178 |
+
|
179 |
+
return all_videos_or_images_features
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
def prepare_inputs_labels_for_multimodal(self, input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities=["image"], image_sizes=None):
|
184 |
+
assert type(modalities) is list, modalities
|
185 |
+
|
186 |
+
vision_tower = self.get_vision_tower()
|
187 |
+
# rank_print(modalities)
|
188 |
+
if vision_tower is None or images is None or input_ids.shape[1] == 1:
|
189 |
+
return input_ids, position_ids, attention_mask, past_key_values, None, labels
|
190 |
+
|
191 |
+
if type(images) is list or images.ndim == 5:
|
192 |
+
if type(images) is list:
|
193 |
+
images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
|
194 |
+
|
195 |
+
video_idx_in_batch = []
|
196 |
+
for _ in range(len(modalities)):
|
197 |
+
if modalities[_] == "video":
|
198 |
+
video_idx_in_batch.append(_)
|
199 |
+
|
200 |
+
images_list = []
|
201 |
+
for image in images:
|
202 |
+
if image.ndim == 4:
|
203 |
+
images_list.append(image)
|
204 |
+
else:
|
205 |
+
images_list.append(image.unsqueeze(0))
|
206 |
+
|
207 |
+
|
208 |
+
vision_encode_type = getattr(self.config, "vision_encode_type", "image")
|
209 |
+
mm_patch_merge_type = getattr(self.config, "mm_patch_merge_type", "flat")
|
210 |
+
image_aspect_ratio = getattr(self.config, "image_aspect_ratio", "square")
|
211 |
+
frame_aspect_ratio = getattr(self.config, "frame_aspect_ratio", "square")
|
212 |
+
mm_newline_position = getattr(self.config, "mm_newline_position", "nothing")
|
213 |
+
|
214 |
+
|
215 |
+
if vision_encode_type == "video_image": # video backbone, process video with compress
|
216 |
+
image_features = self.encode_video_image(images_list, video_idx_in_batch=video_idx_in_batch)
|
217 |
+
else:
|
218 |
+
raise NotImplementedError(vision_encode_type)
|
219 |
+
|
220 |
+
|
221 |
+
if mm_patch_merge_type == "flat":
|
222 |
+
image_features = [x.flatten(0, 1) for x in image_features]
|
223 |
+
elif mm_patch_merge_type.startswith("spatial"):
|
224 |
+
new_image_features = []
|
225 |
+
for image_idx, image_feature in enumerate(image_features):
|
226 |
+
|
227 |
+
if image_idx in video_idx_in_batch: # video operations
|
228 |
+
|
229 |
+
if "anyres" in frame_aspect_ratio:
|
230 |
+
raise NotImplementedError
|
231 |
+
else:
|
232 |
+
frame_feature = image_feature
|
233 |
+
|
234 |
+
if "pad" in mm_patch_merge_type:
|
235 |
+
if mm_newline_position == 'one_token':
|
236 |
+
frame_feature = frame_feature.flatten(0, 1)
|
237 |
+
if "unpad" in mm_patch_merge_type:
|
238 |
+
frame_feature = torch.cat((frame_feature, self.model.image_newline[None].to(frame_feature.device)), dim=0)
|
239 |
+
else:
|
240 |
+
frame_feature = torch.cat((frame_feature, self.model.frame_newline[None].to(frame_feature.device)), dim=0)
|
241 |
+
elif mm_newline_position == 'nothing':
|
242 |
+
frame_feature = frame_feature.flatten(0, 1)
|
243 |
+
else:
|
244 |
+
raise NotImplementedError("add pad please!!")
|
245 |
+
else:
|
246 |
+
frame_feature = frame_feature.flatten(0, 1)
|
247 |
+
|
248 |
+
# print(f"final video frame_feature.shape: {frame_feature.shape}")
|
249 |
+
image_feature = frame_feature
|
250 |
+
|
251 |
+
elif image_feature.shape[0] > 1: # multi patches and multi images operations
|
252 |
+
base_image_feature = image_feature[0]
|
253 |
+
image_feature = image_feature[1:]
|
254 |
+
origin_size = image_feature.shape
|
255 |
+
|
256 |
+
height = width = self.get_model().mm_projector.num_image_patches_per_side
|
257 |
+
assert height * width == base_image_feature.shape[0], f"height:{height}, width: {width}, base_image_feature: {base_image_feature.shape}"
|
258 |
+
|
259 |
+
if "anyres_max" in image_aspect_ratio:
|
260 |
+
matched_anyres_max_num_patches = re.match(r"anyres_max_(\d+)", image_aspect_ratio)
|
261 |
+
if matched_anyres_max_num_patches:
|
262 |
+
max_num_patches = int(matched_anyres_max_num_patches.group(1))
|
263 |
+
|
264 |
+
if "anyres" in image_aspect_ratio:
|
265 |
+
if hasattr(self.get_vision_tower(), "image_size"):
|
266 |
+
vision_tower_image_size = self.get_vision_tower().image_size
|
267 |
+
else:
|
268 |
+
raise ValueError("vision_tower_image_size is not found in the vision tower.")
|
269 |
+
try:
|
270 |
+
num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, vision_tower_image_size, max_resolutions=None)
|
271 |
+
except Exception as e:
|
272 |
+
print(f"Error: {e}")
|
273 |
+
raise e
|
274 |
+
# num_patch_width, num_patch_height = 2, 2
|
275 |
+
|
276 |
+
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
|
277 |
+
else:
|
278 |
+
raise NotImplementedError(image_aspect_ratio)
|
279 |
+
image_feature = image_feature.view(2, 2, height, width, -1)
|
280 |
+
|
281 |
+
if "maxpool2x2" in mm_patch_merge_type:
|
282 |
+
raise NotImplementedError
|
283 |
+
elif "unpad" in mm_patch_merge_type and "anyres_max" in image_aspect_ratio and matched_anyres_max_num_patches:
|
284 |
+
raise NotImplementedError
|
285 |
+
elif "unpad" in mm_patch_merge_type:
|
286 |
+
raise NotImplementedError
|
287 |
+
else:
|
288 |
+
image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
|
289 |
+
image_feature = image_feature.flatten(0, 3)
|
290 |
+
if "nobase" in mm_patch_merge_type:
|
291 |
+
pass
|
292 |
+
else:
|
293 |
+
try:
|
294 |
+
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
|
295 |
+
except Exception as e:
|
296 |
+
raise ValueError(f"{num_patch_width} {num_patch_height} now: base_image_feature: {base_image_feature.shape}, {image_feature.shape}, image_sizes[image_idx]: {image_sizes[image_idx]}, origin_size: {origin_size}, {image_sizes[image_idx]}, {self.config.image_grid_pinpoints}, {vision_tower_image_size}")
|
297 |
+
else: # single image operations
|
298 |
+
image_feature = image_feature[0]
|
299 |
+
if "unpad" in mm_patch_merge_type:
|
300 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[None]), dim=0)
|
301 |
+
|
302 |
+
# print(f"image/video_feature.shape: {image_feature.shape}")
|
303 |
+
new_image_features.append(image_feature)
|
304 |
+
image_features = new_image_features
|
305 |
+
else:
|
306 |
+
raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
|
307 |
+
else:
|
308 |
+
# raise NotImplementedError(f"images.shape={images.shape}, modalities={modalities}")
|
309 |
+
image_features = self.encode_image(images)
|
310 |
+
|
311 |
+
# TODO: image start / end is not implemented here to support pretraining.
|
312 |
+
if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(self.config, "mm_use_im_start_end", False):
|
313 |
+
raise NotImplementedError
|
314 |
+
# print(f"Total images len(image_features: {len(image_features)}")
|
315 |
+
|
316 |
+
# Let's just add dummy tensors if they do not exist,
|
317 |
+
# it is a headache to deal with None all the time.
|
318 |
+
# But it is not ideal, and if you have a better idea,
|
319 |
+
# please open an issue / submit a PR, thanks.
|
320 |
+
_labels = labels
|
321 |
+
_position_ids = position_ids
|
322 |
+
_attention_mask = attention_mask
|
323 |
+
if attention_mask is None:
|
324 |
+
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
|
325 |
+
else:
|
326 |
+
attention_mask = attention_mask.bool()
|
327 |
+
if position_ids is None:
|
328 |
+
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
|
329 |
+
if labels is None:
|
330 |
+
labels = torch.full_like(input_ids, IGNORE_INDEX)
|
331 |
+
|
332 |
+
|
333 |
+
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
|
334 |
+
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
|
335 |
+
|
336 |
+
new_input_embeds = []
|
337 |
+
new_labels = []
|
338 |
+
cur_image_idx = 0
|
339 |
+
|
340 |
+
mm_llm_compress = getattr(self.config, "mm_llm_compress", False)
|
341 |
+
|
342 |
+
if mm_llm_compress:
|
343 |
+
self.model.llm_compress_type = getattr(self.config, "llm_compress_type", "attention")
|
344 |
+
self.model.llm_compress_layer_list = getattr(self.config, "llm_compress_layer_list", [8, 16, 24])
|
345 |
+
self.model.llm_image_token_ratio_list = getattr(self.config, "llm_image_token_ratio_list", [1.0, 0.5, 0.25, 0.125])
|
346 |
+
first_image_token_position = []
|
347 |
+
text_prompt_lens = []
|
348 |
+
else:
|
349 |
+
self.model.llm_compress_type = "attention"
|
350 |
+
self.model.llm_compress_layer_list = []
|
351 |
+
self.model.llm_image_token_ratio_list = []
|
352 |
+
first_image_token_position = []
|
353 |
+
text_prompt_lens = []
|
354 |
+
|
355 |
+
# rank_print("Inserting Images embedding")
|
356 |
+
for batch_idx, cur_input_ids in enumerate(input_ids):
|
357 |
+
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
|
358 |
+
|
359 |
+
if mm_llm_compress:
|
360 |
+
####### copy from pdrop, only support single image/video NOTE ##################
|
361 |
+
# record image position for further dropping
|
362 |
+
image_index = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist()
|
363 |
+
assert len(image_index) == 1, f"Only support singe/video: {image_index}"
|
364 |
+
if image_index == []:
|
365 |
+
first_image_token_position.append(-1)
|
366 |
+
else:
|
367 |
+
first_image_token_position.append(image_index[0])
|
368 |
+
|
369 |
+
|
370 |
+
# record input instruction length in inference mode
|
371 |
+
if not self.training:
|
372 |
+
if image_index == []:
|
373 |
+
assert num_images == 0, num_images
|
374 |
+
else:
|
375 |
+
assert num_images == 1, f"num_images={num_images}"
|
376 |
+
text_prompt_lens.append(cur_input_ids.shape[0] - num_images) # consider image place holder
|
377 |
+
|
378 |
+
###############################################
|
379 |
+
|
380 |
+
# print(f"num_images={num_images}")
|
381 |
+
if num_images == 0:
|
382 |
+
cur_image_features = image_features[cur_image_idx]
|
383 |
+
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
|
384 |
+
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
|
385 |
+
new_input_embeds.append(cur_input_embeds)
|
386 |
+
new_labels.append(labels[batch_idx])
|
387 |
+
cur_image_idx += 1
|
388 |
+
continue
|
389 |
+
|
390 |
+
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
|
391 |
+
cur_input_ids_noim = []
|
392 |
+
cur_labels = labels[batch_idx]
|
393 |
+
cur_labels_noim = []
|
394 |
+
for i in range(len(image_token_indices) - 1):
|
395 |
+
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1 : image_token_indices[i + 1]])
|
396 |
+
cur_labels_noim.append(cur_labels[image_token_indices[i] + 1 : image_token_indices[i + 1]])
|
397 |
+
split_sizes = [x.shape[0] for x in cur_labels_noim]
|
398 |
+
cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
|
399 |
+
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
|
400 |
+
cur_new_input_embeds = []
|
401 |
+
cur_new_labels = []
|
402 |
+
|
403 |
+
for i in range(num_images + 1):
|
404 |
+
cur_new_input_embeds.append(cur_input_embeds_no_im[i])
|
405 |
+
cur_new_labels.append(cur_labels_noim[i])
|
406 |
+
if i < num_images:
|
407 |
+
try:
|
408 |
+
cur_image_features = image_features[cur_image_idx]
|
409 |
+
except IndexError:
|
410 |
+
print(f"cur_image_idx={cur_image_idx} is not ok")
|
411 |
+
cur_image_features = image_features[cur_image_idx - 1]
|
412 |
+
cur_image_idx += 1
|
413 |
+
cur_new_input_embeds.append(cur_image_features)
|
414 |
+
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
|
415 |
+
|
416 |
+
cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
|
417 |
+
|
418 |
+
# import pdb; pdb.set_trace()
|
419 |
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
|
420 |
+
cur_new_labels = torch.cat(cur_new_labels)
|
421 |
+
|
422 |
+
new_input_embeds.append(cur_new_input_embeds)
|
423 |
+
new_labels.append(cur_new_labels)
|
424 |
+
|
425 |
+
|
426 |
+
if mm_llm_compress:
|
427 |
+
self.model.first_image_token_position = first_image_token_position
|
428 |
+
self.model.text_prompt_lens = text_prompt_lens
|
429 |
+
self.model.num_image_token_lens = [image_feature.shape[0] for image_feature in image_features]
|
430 |
+
|
431 |
+
# Truncate sequences to max length as image embeddings can make the sequence longer
|
432 |
+
tokenizer_model_max_length = getattr(self.config, "tokenizer_model_max_length", None)
|
433 |
+
# rank_print("Finishing Inserting")
|
434 |
+
|
435 |
+
new_input_embeds = [x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
|
436 |
+
new_labels = [x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
|
437 |
+
|
438 |
+
# Combine them
|
439 |
+
max_len = max(x.shape[0] for x in new_input_embeds)
|
440 |
+
batch_size = len(new_input_embeds)
|
441 |
+
|
442 |
+
new_input_embeds_padded = []
|
443 |
+
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
|
444 |
+
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
|
445 |
+
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
|
446 |
+
# print("Prepare pos id")
|
447 |
+
|
448 |
+
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
|
449 |
+
cur_len = cur_new_embed.shape[0]
|
450 |
+
if getattr(self.config, "tokenizer_padding_side", "right") == "left":
|
451 |
+
new_input_embeds_padded.append(torch.cat((torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed), dim=0))
|
452 |
+
if cur_len > 0:
|
453 |
+
new_labels_padded[i, -cur_len:] = cur_new_labels
|
454 |
+
attention_mask[i, -cur_len:] = True
|
455 |
+
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
456 |
+
else:
|
457 |
+
new_input_embeds_padded.append(torch.cat((cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0))
|
458 |
+
if cur_len > 0:
|
459 |
+
new_labels_padded[i, :cur_len] = cur_new_labels
|
460 |
+
attention_mask[i, :cur_len] = True
|
461 |
+
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
462 |
+
|
463 |
+
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
|
464 |
+
# print("tokenizer padding")
|
465 |
+
|
466 |
+
if _labels is None:
|
467 |
+
new_labels = None
|
468 |
+
else:
|
469 |
+
new_labels = new_labels_padded
|
470 |
+
|
471 |
+
if _attention_mask is None:
|
472 |
+
attention_mask = None
|
473 |
+
else:
|
474 |
+
attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
|
475 |
+
|
476 |
+
if _position_ids is None:
|
477 |
+
position_ids = None
|
478 |
+
if getattr(self.config, "use_pos_skipping", False) and self.training:
|
479 |
+
position_ids = torch.arange(new_input_embeds.size(1), device=new_input_embeds.device).unsqueeze(0).to(new_input_embeds.device)
|
480 |
+
split_position = random.randint(0, new_input_embeds.size(1))
|
481 |
+
left_add = random.randint(0, self.config.pos_skipping_range)
|
482 |
+
right_add = random.randint(left_add, self.config.pos_skipping_range)
|
483 |
+
position_ids[:, :split_position] += left_add
|
484 |
+
position_ids[:, split_position:] += right_add
|
485 |
+
# import pdb; pdb.set_trace()
|
486 |
+
# print("Finish preparing")
|
487 |
+
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
|
488 |
+
|
489 |
+
def initialize_vision_tokenizer(self, model_args, tokenizer):
|
490 |
+
if model_args.mm_use_im_patch_token:
|
491 |
+
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
|
492 |
+
self.resize_token_embeddings(len(tokenizer))
|
493 |
+
|
494 |
+
if model_args.mm_use_im_start_end:
|
495 |
+
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
|
496 |
+
self.resize_token_embeddings(len(tokenizer))
|
497 |
+
|
498 |
+
if num_new_tokens > 0:
|
499 |
+
input_embeddings = self.get_input_embeddings().weight.data
|
500 |
+
output_embeddings = self.get_output_embeddings().weight.data
|
501 |
+
|
502 |
+
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
|
503 |
+
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
|
504 |
+
|
505 |
+
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
506 |
+
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
507 |
+
|
508 |
+
if model_args.tune_mm_mlp_adapter:
|
509 |
+
for p in self.get_input_embeddings().parameters():
|
510 |
+
p.requires_grad = True
|
511 |
+
for p in self.get_output_embeddings().parameters():
|
512 |
+
p.requires_grad = False
|
513 |
+
|
514 |
+
if model_args.pretrain_mm_mlp_adapter:
|
515 |
+
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location="cpu")
|
516 |
+
embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
|
517 |
+
assert num_new_tokens == 2
|
518 |
+
if input_embeddings.shape == embed_tokens_weight.shape:
|
519 |
+
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
|
520 |
+
elif embed_tokens_weight.shape[0] == num_new_tokens:
|
521 |
+
input_embeddings[-num_new_tokens:] = embed_tokens_weight
|
522 |
+
else:
|
523 |
+
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
|
524 |
+
elif model_args.mm_use_im_patch_token:
|
525 |
+
if model_args.tune_mm_mlp_adapter:
|
526 |
+
for p in self.get_input_embeddings().parameters():
|
527 |
+
p.requires_grad = False
|
528 |
+
for p in self.get_output_embeddings().parameters():
|
529 |
+
p.requires_grad = False
|
530 |
+
|
531 |
+
|
532 |
+
|
533 |
+
class VideoChatFlashQwenConfig(Qwen2Config):
|
534 |
+
model_type = "videochat_flash_qwen"
|
535 |
+
|
536 |
+
|
537 |
+
class VideoChatFlashQwenModel(LlavaMetaModel, Qwen2Model_Flash):
|
538 |
+
config_class = VideoChatFlashQwenConfig
|
539 |
+
|
540 |
+
def __init__(self, config: VideoChatFlashQwenConfig):
|
541 |
+
super(VideoChatFlashQwenModel, self).__init__(config)
|
542 |
+
|
543 |
+
|
544 |
+
class VideoChatFlashQwenForCausalLM(LlavaMetaForCausalLM, Qwen2ForCausalLM_Flash):
|
545 |
+
config_class = VideoChatFlashQwenConfig
|
546 |
+
|
547 |
+
def __init__(self, config):
|
548 |
+
# super(Qwen2ForCausalLM, self).__init__(config)
|
549 |
+
Qwen2ForCausalLM_Flash.__init__(self, config)
|
550 |
+
config.model_type = "videochat_flash_qwen"
|
551 |
+
# config.rope_scaling = None
|
552 |
+
|
553 |
+
self.model = VideoChatFlashQwenModel(config)
|
554 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
555 |
+
# Initialize weights and apply final processing
|
556 |
+
self.post_init()
|
557 |
+
|
558 |
+
def get_model(self):
|
559 |
+
return self.model
|
560 |
+
|
561 |
+
def forward(
|
562 |
+
self,
|
563 |
+
input_ids: torch.LongTensor = None,
|
564 |
+
attention_mask: Optional[torch.Tensor] = None,
|
565 |
+
position_ids: Optional[torch.LongTensor] = None,
|
566 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
567 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
568 |
+
labels: Optional[torch.LongTensor] = None,
|
569 |
+
use_cache: Optional[bool] = None,
|
570 |
+
output_attentions: Optional[bool] = None,
|
571 |
+
output_hidden_states: Optional[bool] = None,
|
572 |
+
images: Optional[torch.FloatTensor] = None,
|
573 |
+
image_sizes: Optional[List[List[int]]] = None,
|
574 |
+
return_dict: Optional[bool] = None,
|
575 |
+
modalities: Optional[List[str]] = ["image"],
|
576 |
+
dpo_forward: Optional[bool] = False,
|
577 |
+
cache_position=None,
|
578 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
579 |
+
|
580 |
+
if inputs_embeds is None:
|
581 |
+
(input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes)
|
582 |
+
|
583 |
+
# print("inputs_embeds.shape:", inputs_embeds.shape)
|
584 |
+
if dpo_forward:
|
585 |
+
raise NotImplementedError
|
586 |
+
else:
|
587 |
+
return super().forward(
|
588 |
+
input_ids=input_ids,
|
589 |
+
attention_mask=attention_mask,
|
590 |
+
position_ids=position_ids,
|
591 |
+
past_key_values=past_key_values,
|
592 |
+
inputs_embeds=inputs_embeds,
|
593 |
+
labels=labels,
|
594 |
+
use_cache=use_cache,
|
595 |
+
output_attentions=output_attentions,
|
596 |
+
output_hidden_states=output_hidden_states,
|
597 |
+
return_dict=return_dict,
|
598 |
+
)
|
599 |
+
|
600 |
+
@torch.no_grad()
|
601 |
+
def generate(
|
602 |
+
self,
|
603 |
+
inputs: Optional[torch.Tensor] = None,
|
604 |
+
images: Optional[torch.Tensor] = None,
|
605 |
+
image_sizes: Optional[torch.Tensor] = None,
|
606 |
+
modalities: Optional[List[str]] = ["image"],
|
607 |
+
**kwargs,
|
608 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
609 |
+
position_ids = kwargs.pop("position_ids", None)
|
610 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
611 |
+
if "inputs_embeds" in kwargs:
|
612 |
+
raise NotImplementedError("`inputs_embeds` is not supported")
|
613 |
+
|
614 |
+
if images is not None:
|
615 |
+
(inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes)
|
616 |
+
else:
|
617 |
+
self.model.image_token_posi = [-1]
|
618 |
+
self.model.prompt_len = None
|
619 |
+
self.model.image_tokens = [0]
|
620 |
+
inputs_embeds = self.get_model().embed_tokens(inputs)
|
621 |
+
|
622 |
+
return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
|
623 |
+
|
624 |
+
@torch.no_grad()
|
625 |
+
def chat(self,
|
626 |
+
video_path,
|
627 |
+
tokenizer,
|
628 |
+
user_prompt,
|
629 |
+
chat_history=None,
|
630 |
+
return_history=True,
|
631 |
+
max_num_frames=512,
|
632 |
+
media_dict=None,
|
633 |
+
generation_config={}):
|
634 |
+
|
635 |
+
frames, time_msg = load_video(video_path, max_num_frames=max_num_frames, media_dict=media_dict)
|
636 |
+
|
637 |
+
image_sizes = [frames[0].shape[:2]]
|
638 |
+
|
639 |
+
frames = [self.get_vision_tower().image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].half().cuda()]
|
640 |
+
|
641 |
+
conv = conv_templates["qwen_2"].copy()
|
642 |
+
|
643 |
+
if chat_history is None or len(chat_history) == 0:
|
644 |
+
user_prompt = f'{DEFAULT_IMAGE_TOKEN}\n{time_msg.rstrip()} {user_prompt}'
|
645 |
+
else:
|
646 |
+
assert DEFAULT_IMAGE_TOKEN in chat_history[0]['content'], chat_history
|
647 |
+
for msg in chat_history:
|
648 |
+
conv.append_message(msg['role'], msg['content'])
|
649 |
+
|
650 |
+
conv.append_message(conv.roles[0], user_prompt)
|
651 |
+
conv.append_message(conv.roles[1], None)
|
652 |
+
|
653 |
+
prompt = conv.get_prompt()
|
654 |
+
|
655 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
|
656 |
+
|
657 |
+
if tokenizer.pad_token_id is None:
|
658 |
+
if "qwen" in tokenizer.name_or_path.lower():
|
659 |
+
print("Setting pad token to bos token for qwen model.")
|
660 |
+
tokenizer.pad_token_id = 151643
|
661 |
+
|
662 |
+
attention_masks = input_ids.ne(tokenizer.pad_token_id).long().cuda()
|
663 |
+
|
664 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
665 |
+
keywords = [stop_str]
|
666 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
667 |
+
|
668 |
+
with torch.inference_mode():
|
669 |
+
output_ids = self.generate(
|
670 |
+
inputs=input_ids,
|
671 |
+
images=frames,
|
672 |
+
attention_mask=attention_masks,
|
673 |
+
modalities=["video"],
|
674 |
+
image_sizes=image_sizes,
|
675 |
+
use_cache=True,
|
676 |
+
stopping_criteria=[stopping_criteria],
|
677 |
+
**generation_config
|
678 |
+
)
|
679 |
+
|
680 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
|
681 |
+
if outputs.endswith(stop_str):
|
682 |
+
outputs = outputs[: -len(stop_str)]
|
683 |
+
|
684 |
+
outputs = outputs.strip()
|
685 |
+
|
686 |
+
# print(f"\033[91m== Question: \033[0m\n{prompt}\n")
|
687 |
+
# print(f"\033[91m== Response: \033[0m\n{outputs}\n")
|
688 |
+
|
689 |
+
if chat_history is None:
|
690 |
+
chat_history = []
|
691 |
+
|
692 |
+
chat_history.append({"role":conv.roles[0], "content":user_prompt})
|
693 |
+
chat_history.append({"role":conv.roles[1], "content":outputs})
|
694 |
+
if return_history:
|
695 |
+
return outputs, chat_history
|
696 |
+
else:
|
697 |
+
return outputs
|
698 |
+
|
699 |
+
|
700 |
+
|
701 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
702 |
+
images = kwargs.pop("images", None)
|
703 |
+
image_sizes = kwargs.pop("image_sizes", None)
|
704 |
+
inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
|
705 |
+
if images is not None:
|
706 |
+
inputs["images"] = images
|
707 |
+
if image_sizes is not None:
|
708 |
+
inputs["image_sizes"] = image_sizes
|
709 |
+
return inputs
|
710 |
+
|
711 |
+
|
712 |
+
AutoConfig.register("videochat_flash_qwen", VideoChatFlashQwenConfig)
|
713 |
+
AutoModelForCausalLM.register(VideoChatFlashQwenConfig, VideoChatFlashQwenForCausalLM)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|im_end|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|im_end|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"model_max_length": 32768,
|
203 |
+
"pad_token": "<|endoftext|>",
|
204 |
+
"padding_side": "right",
|
205 |
+
"split_special_tokens": false,
|
206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
207 |
+
"unk_token": null
|
208 |
+
}
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b56ec34c18c9bbd191a2faf6892f9eb4c856cdec5d8b0fa05f54ad8da5d942f2
|
3 |
+
size 7480
|
vision_tower_builder.py
ADDED
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union, Dict
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from functools import partial, reduce
|
4 |
+
from PIL import Image
|
5 |
+
import torch
|
6 |
+
import torch.utils.checkpoint
|
7 |
+
from torch import nn
|
8 |
+
import os
|
9 |
+
from transformers.image_processing_utils import BatchFeature, get_size_dict
|
10 |
+
from transformers.image_transforms import (
|
11 |
+
convert_to_rgb,
|
12 |
+
normalize,
|
13 |
+
rescale,
|
14 |
+
resize,
|
15 |
+
to_channel_dimension_format,
|
16 |
+
)
|
17 |
+
from transformers.image_utils import (
|
18 |
+
ChannelDimension,
|
19 |
+
PILImageResampling,
|
20 |
+
to_numpy_array,
|
21 |
+
)
|
22 |
+
import numpy as np
|
23 |
+
import torch
|
24 |
+
import torch.nn as nn
|
25 |
+
import torch.nn.functional as F
|
26 |
+
import torch.utils.checkpoint as checkpoint
|
27 |
+
from functools import partial
|
28 |
+
try:
|
29 |
+
from flash_attn import flash_attn_qkvpacked_func
|
30 |
+
except:
|
31 |
+
print("You need to install flash_attn")
|
32 |
+
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
class DropPath(nn.Module):
|
37 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
38 |
+
"""
|
39 |
+
def __init__(self, drop_prob=None):
|
40 |
+
super(DropPath, self).__init__()
|
41 |
+
self.drop_prob = drop_prob
|
42 |
+
|
43 |
+
def forward(self, x):
|
44 |
+
return drop_path(x, self.drop_prob, self.training)
|
45 |
+
|
46 |
+
def extra_repr(self) -> str:
|
47 |
+
return 'p={}'.format(self.drop_prob)
|
48 |
+
|
49 |
+
|
50 |
+
class Mlp(nn.Module):
|
51 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
52 |
+
super().__init__()
|
53 |
+
out_features = out_features or in_features
|
54 |
+
hidden_features = hidden_features or in_features
|
55 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
56 |
+
self.act = act_layer()
|
57 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
58 |
+
self.drop = nn.Dropout(drop)
|
59 |
+
|
60 |
+
def forward(self, x):
|
61 |
+
x = self.fc1(x)
|
62 |
+
x = self.act(x)
|
63 |
+
x = self.drop(x)
|
64 |
+
x = self.fc2(x)
|
65 |
+
x = self.drop(x)
|
66 |
+
return x
|
67 |
+
|
68 |
+
class Attention(nn.Module):
|
69 |
+
def __init__(
|
70 |
+
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
|
71 |
+
proj_drop=0., attn_head_dim=None,
|
72 |
+
attn_type='flash_v2'):
|
73 |
+
super().__init__()
|
74 |
+
self.num_heads = num_heads
|
75 |
+
head_dim = dim // num_heads
|
76 |
+
if attn_head_dim is not None:
|
77 |
+
head_dim = attn_head_dim
|
78 |
+
all_head_dim = head_dim * self.num_heads
|
79 |
+
self.scale = qk_scale or head_dim ** -0.5
|
80 |
+
|
81 |
+
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
82 |
+
if qkv_bias:
|
83 |
+
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
84 |
+
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
85 |
+
else:
|
86 |
+
self.q_bias = None
|
87 |
+
self.v_bias = None
|
88 |
+
|
89 |
+
if attn_type not in ['origin', 'flash_v2']:
|
90 |
+
raise NotImplementedError(f"Not support attn_type: {attn_type}")
|
91 |
+
|
92 |
+
# print('umt:', f'attn_type: {attn_type}')
|
93 |
+
|
94 |
+
self.attn_type = attn_type
|
95 |
+
if attn_type == 'flash_v2':
|
96 |
+
self.attn_drop = attn_drop
|
97 |
+
else:
|
98 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
99 |
+
self.proj = nn.Linear(all_head_dim, dim)
|
100 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
101 |
+
|
102 |
+
def forward(self, x):
|
103 |
+
B, N, C = x.shape
|
104 |
+
qkv_bias = None
|
105 |
+
if self.q_bias is not None:
|
106 |
+
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
107 |
+
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
108 |
+
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
109 |
+
|
110 |
+
if self.attn_type == 'flash_v2':
|
111 |
+
qkv = qkv.reshape(B, N, 3, self.num_heads, -1)
|
112 |
+
x = flash_attn_qkvpacked_func(qkv, dropout_p=self.attn_drop, softmax_scale=self.scale, causal=False).reshape(B, N, -1)
|
113 |
+
else:
|
114 |
+
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
115 |
+
q, k, v = qkv[0], qkv[1], qkv[
|
116 |
+
2] # make torchscript happy (cannot use tensor as tuple)
|
117 |
+
# B num_heads N head_dim
|
118 |
+
|
119 |
+
q = q * self.scale
|
120 |
+
attn = (q @ k.transpose(-2, -1))
|
121 |
+
|
122 |
+
attn = attn.softmax(dim=-1)
|
123 |
+
attn = self.attn_drop(attn)
|
124 |
+
|
125 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
126 |
+
|
127 |
+
x = self.proj(x)
|
128 |
+
x = self.proj_drop(x)
|
129 |
+
return x
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
class Block(nn.Module):
|
135 |
+
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
136 |
+
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
137 |
+
attn_head_dim=None):
|
138 |
+
super().__init__()
|
139 |
+
self.norm1 = norm_layer(dim)
|
140 |
+
self.attn = Attention(
|
141 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
142 |
+
attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim)
|
143 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
144 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
145 |
+
self.norm2 = norm_layer(dim)
|
146 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
147 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
148 |
+
|
149 |
+
if init_values > 0:
|
150 |
+
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
151 |
+
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
152 |
+
else:
|
153 |
+
self.gamma_1, self.gamma_2 = None, None
|
154 |
+
|
155 |
+
def forward(self, x):
|
156 |
+
if self.gamma_1 is None:
|
157 |
+
x = x + self.drop_path(self.attn(self.norm1(x)))
|
158 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
159 |
+
else:
|
160 |
+
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
|
161 |
+
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
162 |
+
return x
|
163 |
+
|
164 |
+
|
165 |
+
class PatchEmbed(nn.Module):
|
166 |
+
""" Image to Patch Embedding
|
167 |
+
"""
|
168 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2):
|
169 |
+
super().__init__()
|
170 |
+
img_size = to_2tuple(img_size)
|
171 |
+
patch_size = to_2tuple(patch_size)
|
172 |
+
self.tubelet_size = int(tubelet_size)
|
173 |
+
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
|
174 |
+
self.img_size = img_size
|
175 |
+
self.patch_size = patch_size
|
176 |
+
self.num_patches = num_patches
|
177 |
+
self.proj = nn.Conv3d(
|
178 |
+
in_channels=in_chans, out_channels=embed_dim,
|
179 |
+
kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]),
|
180 |
+
stride=(self.tubelet_size, patch_size[0], patch_size[1])
|
181 |
+
)
|
182 |
+
# print('umt:', f'Num of patches: {num_patches}')
|
183 |
+
|
184 |
+
def forward(self, x, **kwargs):
|
185 |
+
B, C, T, H, W = x.shape
|
186 |
+
# FIXME look at relaxing size constraints
|
187 |
+
# assert H == self.img_size[0] and W == self.img_size[1], \
|
188 |
+
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
189 |
+
x = self.proj(x).flatten(2).transpose(1, 2)
|
190 |
+
return x
|
191 |
+
|
192 |
+
# sin-cos position encoding
|
193 |
+
# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
|
194 |
+
def get_sinusoid_encoding_table(n_position, d_hid, ckpt_num_frame=-1, cur_frame=12):
|
195 |
+
''' Sinusoid position encoding table '''
|
196 |
+
# TODO: make it with torch instead of numpy
|
197 |
+
def get_position_angle_vec(position):
|
198 |
+
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
|
199 |
+
|
200 |
+
if ckpt_num_frame != -1 and ckpt_num_frame != cur_frame:
|
201 |
+
# print('umt:', f"Interpolate position embedding")
|
202 |
+
# print('umt:', f"Testing frame: {cur_frame}")
|
203 |
+
# print('umt:', f"Checkpoint frame: {ckpt_num_frame}")
|
204 |
+
|
205 |
+
T = ckpt_num_frame # checkpoint frame
|
206 |
+
new_T = cur_frame # testing frame
|
207 |
+
n_position = n_position // new_T * T # generate checkpoint position embedding
|
208 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
|
209 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
210 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
211 |
+
sinusoid_table = torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0)
|
212 |
+
# interpolate
|
213 |
+
P = int((n_position // T) ** 0.5)
|
214 |
+
C = d_hid
|
215 |
+
sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C)
|
216 |
+
sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW, C, T
|
217 |
+
sinusoid_table = torch.nn.functional.interpolate(sinusoid_table, size=new_T, mode='linear')
|
218 |
+
sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3) # B, T, H, W, C
|
219 |
+
sinusoid_table = sinusoid_table.flatten(1, 3)
|
220 |
+
return sinusoid_table
|
221 |
+
else:
|
222 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
|
223 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
224 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
225 |
+
return torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0)
|
226 |
+
|
227 |
+
|
228 |
+
def get_sinusoid_encoding_table2(n_position=784, d_hid=1024, cur_frame=8, ckpt_num_frame=4, pre_n_position=784):
|
229 |
+
''' Sinusoid position encoding table '''
|
230 |
+
# TODO: make it with torch instead of numpy
|
231 |
+
def get_position_angle_vec(position):
|
232 |
+
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
|
233 |
+
|
234 |
+
# generate checkpoint position embedding
|
235 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(pre_n_position)])
|
236 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
237 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
238 |
+
sinusoid_table = torch.tensor(sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0)
|
239 |
+
|
240 |
+
# print(f"n_position: {n_position}")
|
241 |
+
# print(f"pre_n_position: {pre_n_position}")
|
242 |
+
|
243 |
+
if n_position != pre_n_position:
|
244 |
+
T = ckpt_num_frame # checkpoint frame
|
245 |
+
P = 14 # checkpoint size
|
246 |
+
C = d_hid
|
247 |
+
new_P = int((n_position // cur_frame) ** 0.5) # testing size
|
248 |
+
# print(f'Pretraining uses 14x14, but current version is {new_P}x{new_P}')
|
249 |
+
# print(f'Interpolate the position embedding')
|
250 |
+
sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C)
|
251 |
+
sinusoid_table = sinusoid_table.reshape(-1, P, P, C).permute(0, 3, 1, 2)
|
252 |
+
sinusoid_table = torch.nn.functional.interpolate(
|
253 |
+
sinusoid_table, size=(new_P, new_P), mode='bicubic', align_corners=False)
|
254 |
+
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
|
255 |
+
sinusoid_table = sinusoid_table.permute(0, 2, 3, 1).reshape(-1, T, new_P, new_P, C)
|
256 |
+
sinusoid_table = sinusoid_table.flatten(1, 3) # B, THW, C
|
257 |
+
|
258 |
+
if cur_frame != ckpt_num_frame:
|
259 |
+
# print(f'Pretraining uses 4 frames, but current frame is {cur_frame}')
|
260 |
+
# print(f'Interpolate the position embedding')
|
261 |
+
T = ckpt_num_frame # checkpoint frame
|
262 |
+
new_T = cur_frame # testing frame
|
263 |
+
# interpolate
|
264 |
+
P = int((n_position // cur_frame) ** 0.5) # testing size
|
265 |
+
C = d_hid
|
266 |
+
sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C)
|
267 |
+
sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW, C, T
|
268 |
+
sinusoid_table = torch.nn.functional.interpolate(sinusoid_table, size=new_T, mode='linear')
|
269 |
+
sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3) # B, T, H, W, C
|
270 |
+
sinusoid_table = sinusoid_table.flatten(1, 3) # B, THW, C
|
271 |
+
|
272 |
+
return sinusoid_table
|
273 |
+
|
274 |
+
|
275 |
+
class PretrainVisionTransformerEncoder(nn.Module):
|
276 |
+
""" Vision Transformer with support for patch or hybrid CNN input stage
|
277 |
+
"""
|
278 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, depth=12,
|
279 |
+
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
|
280 |
+
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, num_frames=8, tubelet_size=1,
|
281 |
+
use_learnable_pos_emb=False,
|
282 |
+
use_checkpoint=False, checkpoint_num=0,
|
283 |
+
ckpt_num_frame=-1, with_ln=True, return_index=-1
|
284 |
+
):
|
285 |
+
super().__init__()
|
286 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
287 |
+
self.patch_embed = PatchEmbed(
|
288 |
+
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
|
289 |
+
num_frames=num_frames, tubelet_size=tubelet_size
|
290 |
+
)
|
291 |
+
num_patches = self.patch_embed.num_patches
|
292 |
+
self.depth = depth + return_index + 1
|
293 |
+
self.use_checkpoint = use_checkpoint
|
294 |
+
self.checkpoint_num = checkpoint_num
|
295 |
+
# print('umt:', f"Use checkpoint: {use_checkpoint}")
|
296 |
+
# print('umt:', f"Checkpoint number: {checkpoint_num}")
|
297 |
+
# print('UMT:', f"Real runing depth: {self.depth}")
|
298 |
+
|
299 |
+
# TODO: Add the cls token
|
300 |
+
if use_learnable_pos_emb:
|
301 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
302 |
+
self.img_pos_embed = nn.Parameter(torch.zeros(1, num_patches//(num_frames//tubelet_size) + 1, embed_dim))
|
303 |
+
else:
|
304 |
+
# sine-cosine positional embeddings
|
305 |
+
if img_size != 224:
|
306 |
+
self.pos_embed = get_sinusoid_encoding_table2(num_patches, embed_dim, ckpt_num_frame=ckpt_num_frame, cur_frame=num_frames//tubelet_size)
|
307 |
+
self.img_pos_embed = get_sinusoid_encoding_table2(num_patches//(num_frames//tubelet_size), embed_dim, cur_frame=1, ckpt_num_frame=1, pre_n_position=14*14)
|
308 |
+
else:
|
309 |
+
self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim, ckpt_num_frame=ckpt_num_frame, cur_frame=num_frames//tubelet_size)
|
310 |
+
self.img_pos_embed = get_sinusoid_encoding_table(num_patches//(num_frames//tubelet_size), embed_dim)
|
311 |
+
|
312 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
313 |
+
self.blocks = nn.ModuleList([
|
314 |
+
Block(
|
315 |
+
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
316 |
+
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
317 |
+
init_values=init_values)
|
318 |
+
for i in range(self.depth)])
|
319 |
+
|
320 |
+
if with_ln:
|
321 |
+
self.vision_layernorm = nn.LayerNorm(embed_dim, eps=1e-12)
|
322 |
+
else:
|
323 |
+
self.vision_layernorm = nn.Identity()
|
324 |
+
|
325 |
+
if use_learnable_pos_emb:
|
326 |
+
trunc_normal_(self.pos_embed, std=.02)
|
327 |
+
|
328 |
+
@torch.jit.ignore
|
329 |
+
def no_weight_decay(self):
|
330 |
+
return {'pos_embed', 'cls_token'}
|
331 |
+
|
332 |
+
def forward_features(self, x, use_image=False):
|
333 |
+
x = self.patch_embed(x)
|
334 |
+
|
335 |
+
if use_image:
|
336 |
+
x = x + self.img_pos_embed.type_as(x).to(x.device).clone().detach()
|
337 |
+
else:
|
338 |
+
x = x + self.pos_embed.type_as(x).to(x.device).clone().detach()
|
339 |
+
|
340 |
+
B, _, C = x.shape
|
341 |
+
x_vis = x
|
342 |
+
|
343 |
+
for idx, blk in enumerate(self.blocks):
|
344 |
+
if self.use_checkpoint and idx < self.checkpoint_num:
|
345 |
+
x_vis = checkpoint.checkpoint(blk, x_vis)
|
346 |
+
else:
|
347 |
+
x_vis = blk(x_vis)
|
348 |
+
|
349 |
+
# with ln ot not
|
350 |
+
x_vis = self.vision_layernorm(x_vis)
|
351 |
+
return x_vis
|
352 |
+
|
353 |
+
def forward(self, x, use_image=False):
|
354 |
+
x_vis = self.forward_features(x, use_image)
|
355 |
+
return x_vis
|
356 |
+
|
357 |
+
|
358 |
+
class PretrainVisionTransformer(nn.Module):
|
359 |
+
""" Vision Transformer with support for patch or hybrid CNN input stage
|
360 |
+
"""
|
361 |
+
def __init__(self,
|
362 |
+
img_size=224,
|
363 |
+
patch_size=16,
|
364 |
+
encoder_in_chans=3,
|
365 |
+
encoder_embed_dim=768,
|
366 |
+
encoder_depth=12,
|
367 |
+
encoder_num_heads=12,
|
368 |
+
mlp_ratio=4.,
|
369 |
+
qkv_bias=True,
|
370 |
+
qk_scale=None,
|
371 |
+
drop_rate=0.,
|
372 |
+
attn_drop_rate=0.,
|
373 |
+
drop_path_rate=0.,
|
374 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
375 |
+
init_values=0.,
|
376 |
+
use_learnable_pos_emb=False,
|
377 |
+
num_frames=8,
|
378 |
+
tubelet_size=1,
|
379 |
+
use_checkpoint=False,
|
380 |
+
checkpoint_num=0,
|
381 |
+
ckpt_num_frame=4, # the pretrained model uses 4 frames
|
382 |
+
return_index=-1,
|
383 |
+
with_ln=False
|
384 |
+
):
|
385 |
+
super().__init__()
|
386 |
+
|
387 |
+
self.encoder = PretrainVisionTransformerEncoder(
|
388 |
+
img_size=img_size,
|
389 |
+
patch_size=patch_size,
|
390 |
+
in_chans=encoder_in_chans,
|
391 |
+
embed_dim=encoder_embed_dim,
|
392 |
+
depth=encoder_depth,
|
393 |
+
num_heads=encoder_num_heads,
|
394 |
+
mlp_ratio=mlp_ratio,
|
395 |
+
qkv_bias=qkv_bias,
|
396 |
+
qk_scale=qk_scale,
|
397 |
+
drop_rate=drop_rate,
|
398 |
+
attn_drop_rate=attn_drop_rate,
|
399 |
+
drop_path_rate=drop_path_rate,
|
400 |
+
norm_layer=norm_layer,
|
401 |
+
init_values=init_values,
|
402 |
+
num_frames=num_frames,
|
403 |
+
tubelet_size=tubelet_size,
|
404 |
+
use_learnable_pos_emb=use_learnable_pos_emb,
|
405 |
+
use_checkpoint=use_checkpoint,
|
406 |
+
checkpoint_num=checkpoint_num,
|
407 |
+
ckpt_num_frame=ckpt_num_frame,
|
408 |
+
with_ln=with_ln,
|
409 |
+
return_index=return_index
|
410 |
+
)
|
411 |
+
# print('umt:', f'With LN: {with_ln}')
|
412 |
+
# print('UMT:', f'Total {encoder_depth} layer')
|
413 |
+
# print('UMT:', f'Return {encoder_depth+return_index+1}-th layer')
|
414 |
+
|
415 |
+
self.apply(self._init_weights)
|
416 |
+
|
417 |
+
def _init_weights(self, m):
|
418 |
+
if isinstance(m, nn.Linear):
|
419 |
+
nn.init.xavier_uniform_(m.weight)
|
420 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
421 |
+
nn.init.constant_(m.bias, 0)
|
422 |
+
elif isinstance(m, nn.LayerNorm):
|
423 |
+
nn.init.constant_(m.bias, 0)
|
424 |
+
nn.init.constant_(m.weight, 1.0)
|
425 |
+
|
426 |
+
@torch.jit.ignore
|
427 |
+
def no_weight_decay(self):
|
428 |
+
return {'pos_embed', 'cls_token', 'clip_pos_embed'}
|
429 |
+
|
430 |
+
def forward(self, x, use_image=False):
|
431 |
+
T = x.shape[2]
|
432 |
+
x_vis = self.encoder(x, use_image) # [B, N_vis, C_e]
|
433 |
+
B, TL, C = x_vis.shape
|
434 |
+
x_vis = x_vis.view(B, T, TL // T, C)
|
435 |
+
|
436 |
+
return x_vis
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
|
441 |
+
|
442 |
+
|
443 |
+
|
444 |
+
class UMTImageProcessor:
|
445 |
+
def __init__(self, image_mean=(0.485, 0.456, 0.406), image_std=(0.229, 0.224, 0.225), size=(224, 224), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
|
446 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
447 |
+
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
|
448 |
+
|
449 |
+
self.image_mean = image_mean
|
450 |
+
self.image_std = image_std
|
451 |
+
self.size = size
|
452 |
+
self.resample = resample
|
453 |
+
self.rescale_factor = rescale_factor
|
454 |
+
self.data_format = data_format
|
455 |
+
self.crop_size = crop_size
|
456 |
+
|
457 |
+
def preprocess(self, images, return_tensors, target_size=None):
|
458 |
+
if isinstance(images, Image.Image):
|
459 |
+
images = [images]
|
460 |
+
else:
|
461 |
+
# to adapt video data
|
462 |
+
images = [to_numpy_array(image) for image in images]
|
463 |
+
assert isinstance(images, list)
|
464 |
+
|
465 |
+
if target_size is None:
|
466 |
+
target_size = self.size
|
467 |
+
|
468 |
+
transforms = [
|
469 |
+
convert_to_rgb,
|
470 |
+
to_numpy_array,
|
471 |
+
partial(resize, size=target_size, resample=self.resample, data_format=self.data_format),
|
472 |
+
partial(rescale, scale=self.rescale_factor, data_format=self.data_format),
|
473 |
+
partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format),
|
474 |
+
partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format),
|
475 |
+
]
|
476 |
+
|
477 |
+
images = reduce(lambda x, f: [*map(f, x)], transforms, images)
|
478 |
+
data = {"pixel_values": images}
|
479 |
+
|
480 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
481 |
+
|
482 |
+
|
483 |
+
class UMTVisionConfig:
|
484 |
+
model_type = "umt_vision_model"
|
485 |
+
|
486 |
+
def __init__(
|
487 |
+
self,
|
488 |
+
num_frames=4,
|
489 |
+
hidden_size=1024,
|
490 |
+
num_hidden_layers=24,
|
491 |
+
num_attention_heads=16,
|
492 |
+
num_channels=3,
|
493 |
+
image_size=224,
|
494 |
+
patch_size=16,
|
495 |
+
return_idx=-2
|
496 |
+
# **kwargs,
|
497 |
+
):
|
498 |
+
# super().__init__(**kwargs)
|
499 |
+
self.num_frames = num_frames
|
500 |
+
self.hidden_size = hidden_size
|
501 |
+
self.num_hidden_layers = num_hidden_layers
|
502 |
+
self.num_attention_heads = num_attention_heads
|
503 |
+
self.num_channels = num_channels
|
504 |
+
self.patch_size = patch_size
|
505 |
+
self.image_size = image_size
|
506 |
+
self.return_idx = return_idx
|
507 |
+
|
508 |
+
|
509 |
+
def build_vit(config, pt_type='origin'):
|
510 |
+
model = PretrainVisionTransformer(
|
511 |
+
img_size=config.image_size,
|
512 |
+
patch_size=16,
|
513 |
+
encoder_embed_dim=1024,
|
514 |
+
encoder_depth=24,
|
515 |
+
encoder_num_heads=16,
|
516 |
+
drop_path_rate=0.,
|
517 |
+
num_frames=config.num_frames,
|
518 |
+
tubelet_size=1,
|
519 |
+
use_checkpoint=True,
|
520 |
+
checkpoint_num=24,
|
521 |
+
return_index=config.return_idx,
|
522 |
+
with_ln=True, # merge vision_layernorm in it
|
523 |
+
)
|
524 |
+
|
525 |
+
# no need to load pt
|
526 |
+
|
527 |
+
return model
|
528 |
+
|
529 |
+
|
530 |
+
|
531 |
+
class UMTVisionTower(nn.Module):
|
532 |
+
def __init__(self, vision_tower, vision_tower_cfg, delay_load=False, pt_type='origin', image_size=224):
|
533 |
+
super().__init__()
|
534 |
+
|
535 |
+
self.is_loaded = False
|
536 |
+
self.pt_type = pt_type
|
537 |
+
|
538 |
+
self.config = UMTVisionConfig(num_frames=vision_tower_cfg.mm_local_num_frames, return_idx=vision_tower_cfg.mm_vision_select_layer, image_size=image_size)
|
539 |
+
|
540 |
+
self.vision_tower_name = vision_tower
|
541 |
+
|
542 |
+
self.image_processor = UMTImageProcessor(size=(image_size, image_size))
|
543 |
+
|
544 |
+
if not delay_load:
|
545 |
+
print(f"Loading vision tower: {vision_tower}")
|
546 |
+
self.load_model()
|
547 |
+
elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False):
|
548 |
+
# TODO: better detector is needed.
|
549 |
+
print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
|
550 |
+
self.load_model()
|
551 |
+
elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts:
|
552 |
+
print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
|
553 |
+
self.load_model()
|
554 |
+
else:
|
555 |
+
self.cfg_only = self.config
|
556 |
+
|
557 |
+
def load_model(self, device_map=None):
|
558 |
+
if self.is_loaded:
|
559 |
+
print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
|
560 |
+
return
|
561 |
+
|
562 |
+
self.vision_tower = build_vit(self.config, pt_type=self.pt_type)
|
563 |
+
self.vision_tower.requires_grad_(False)
|
564 |
+
|
565 |
+
self.is_loaded = True
|
566 |
+
|
567 |
+
def forward(self, images):
|
568 |
+
if type(images) is list:
|
569 |
+
raise NotImplementedError
|
570 |
+
else:
|
571 |
+
# input: B T C H W
|
572 |
+
# output: B T*L C
|
573 |
+
T = images.shape[1]
|
574 |
+
images = images.permute(0, 2, 1, 3, 4)
|
575 |
+
image_embeds = self.vision_tower(images, use_image=(T == 1))
|
576 |
+
B, T, L, C = image_embeds.shape
|
577 |
+
image_embeds = image_embeds.reshape(B, -1, C)
|
578 |
+
|
579 |
+
return image_embeds
|
580 |
+
|
581 |
+
@property
|
582 |
+
def dummy_feature(self):
|
583 |
+
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
|
584 |
+
|
585 |
+
@property
|
586 |
+
def dtype(self):
|
587 |
+
for p in self.vision_tower.parameters():
|
588 |
+
return p.dtype
|
589 |
+
|
590 |
+
@property
|
591 |
+
def device(self):
|
592 |
+
for p in self.vision_tower.parameters():
|
593 |
+
return p.device
|
594 |
+
|
595 |
+
@property
|
596 |
+
def hidden_size(self):
|
597 |
+
return self.config.hidden_size
|
598 |
+
|
599 |
+
@property
|
600 |
+
def num_patches(self):
|
601 |
+
return (self.config.image_size // self.config.patch_size) ** 2
|
602 |
+
|
603 |
+
@property
|
604 |
+
def num_patches_per_side(self):
|
605 |
+
return self.config.image_size // self.config.patch_size
|
606 |
+
|
607 |
+
@property
|
608 |
+
def image_size(self):
|
609 |
+
return self.config.image_size
|
610 |
+
|
611 |
+
|
612 |
+
def build_vision_tower(vision_tower_cfg, **kwargs):
|
613 |
+
vision_tower = getattr(vision_tower_cfg, "mm_vision_tower", getattr(vision_tower_cfg, "vision_tower", None))
|
614 |
+
|
615 |
+
|
616 |
+
if "umt-hd" in vision_tower:
|
617 |
+
return UMTVisionTower(vision_tower, vision_tower_cfg=vision_tower_cfg, image_size=448, **kwargs)
|
618 |
+
elif "umt" in vision_tower:
|
619 |
+
raise NotImplementedError
|
620 |
+
return UMTVisionTower(vision_tower, vision_tower_cfg=vision_tower_cfg, **kwargs)
|
621 |
+
|
622 |
+
raise ValueError(f"Unknown vision tower: {vision_tower}")
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|