Navyabhat commited on
Commit
f776adf
1 Parent(s): d43c6a1

Upload 76 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. inference/__init__.py +0 -0
  2. inference/conversation.py +224 -0
  3. inference/inference.ipynb +369 -0
  4. inference/main.py +226 -0
  5. inference/model/__init__.py +2 -0
  6. inference/model/builder.py +180 -0
  7. inference/model/language_model/configuration_llava_phi.py +191 -0
  8. inference/model/language_model/llava_phi.py +126 -0
  9. inference/model/llava_arch.py +330 -0
  10. inference/model/multimodal_encoder/clip_encoder.py +89 -0
  11. inference/model/multimodal_projector/builder.py +50 -0
  12. llava-phi/llava_phi/__init__.py +1 -0
  13. llava-phi/llava_phi/constants.py +12 -0
  14. llava-phi/llava_phi/conversation.py +224 -0
  15. llava-phi/llava_phi/eval/eval_gpt_review.py +113 -0
  16. llava-phi/llava_phi/eval/eval_gpt_review_bench.py +121 -0
  17. llava-phi/llava_phi/eval/eval_gpt_review_visual.py +118 -0
  18. llava-phi/llava_phi/eval/eval_pope.py +81 -0
  19. llava-phi/llava_phi/eval/eval_science_qa.py +114 -0
  20. llava-phi/llava_phi/eval/eval_science_qa_gpt4.py +104 -0
  21. llava-phi/llava_phi/eval/eval_science_qa_gpt4_requery.py +149 -0
  22. llava-phi/llava_phi/eval/eval_textvqa.py +65 -0
  23. llava-phi/llava_phi/eval/m4c_evaluator.py +334 -0
  24. llava-phi/llava_phi/eval/model_qa.py +88 -0
  25. llava-phi/llava_phi/eval/model_vqa.py +115 -0
  26. llava-phi/llava_phi/eval/model_vqa_loader.py +144 -0
  27. llava-phi/llava_phi/eval/model_vqa_mmbench.py +173 -0
  28. llava-phi/llava_phi/eval/model_vqa_phi.py +117 -0
  29. llava-phi/llava_phi/eval/model_vqa_science.py +152 -0
  30. llava-phi/llava_phi/eval/qa_baseline_gpt35.py +74 -0
  31. llava-phi/llava_phi/eval/run_llava_phi.py +93 -0
  32. llava-phi/llava_phi/eval/summarize_gpt_review.py +60 -0
  33. llava-phi/llava_phi/eval/table/rule.json +11 -0
  34. llava-phi/llava_phi/mm_utils.py +96 -0
  35. llava-phi/llava_phi/model/__init__.py +2 -0
  36. llava-phi/llava_phi/model/builder.py +121 -0
  37. llava-phi/llava_phi/model/language_model/configuration_llava_phi.py +179 -0
  38. llava-phi/llava_phi/model/language_model/llava_phi.py +126 -0
  39. llava-phi/llava_phi/model/llava_arch.py +208 -0
  40. llava-phi/llava_phi/model/multimodal_encoder/clip_encoder.py +89 -0
  41. llava-phi/llava_phi/model/multimodal_projector/builder.py +50 -0
  42. llava-phi/llava_phi/serve/__init__.py +0 -0
  43. llava-phi/llava_phi/serve/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llava-phi/llava_phi/serve/__pycache__/cli.cpython-310.pyc +0 -0
  45. llava-phi/llava_phi/serve/app.py +354 -0
  46. llava-phi/llava_phi/serve/cli.py +121 -0
  47. llava-phi/llava_phi/serve/examples/extreme_ironing.jpg +0 -0
  48. llava-phi/llava_phi/serve/examples/waterview.jpg +0 -0
  49. llava-phi/llava_phi/train/convert_model2base_llava_phi.py +767 -0
  50. llava-phi/llava_phi/train/llava_phi_trainer.py +156 -0
inference/__init__.py ADDED
File without changes
inference/conversation.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Tuple
4
+
5
+
6
+ class SeparatorStyle(Enum):
7
+ """Different separator style."""
8
+ SINGLE = auto()
9
+ TWO = auto()
10
+ MPT = auto()
11
+ PLAIN = auto()
12
+ LLAMA_2 = auto()
13
+
14
+
15
+ @dataclasses.dataclass
16
+ class Conversation:
17
+ """A class that keeps all conversation history."""
18
+ system: str
19
+ roles: List[str]
20
+ messages: List[List[str]]
21
+ offset: int
22
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
23
+ sep: str = "###"
24
+ sep2: str = None
25
+ version: str = "Unknown"
26
+
27
+ skip_next: bool = False
28
+
29
+ def get_prompt(self):
30
+ messages = self.messages
31
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
32
+ messages = self.messages.copy()
33
+ init_role, init_msg = messages[0].copy()
34
+ init_msg = init_msg[0].replace("<image>", "").strip()
35
+ if 'mmtag' in self.version:
36
+ messages[0] = (init_role, init_msg)
37
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
38
+ messages.insert(1, (self.roles[1], "Received."))
39
+ else:
40
+ messages[0] = (init_role, "<image>\n" + init_msg)
41
+
42
+ if self.sep_style == SeparatorStyle.SINGLE:
43
+ ret = self.system + self.sep
44
+ for role, message in messages:
45
+ if message:
46
+ if type(message) is tuple:
47
+ message, _, _ = message
48
+ ret += role + ": " + message + self.sep
49
+ else:
50
+ ret += role + ":"
51
+ elif self.sep_style == SeparatorStyle.TWO:
52
+ seps = [self.sep, self.sep2]
53
+ ret = self.system + seps[0]
54
+ for i, (role, message) in enumerate(messages):
55
+ if message:
56
+ if type(message) is tuple:
57
+ message, _, _ = message
58
+ ret += role + ": " + message + seps[i % 2]
59
+ else:
60
+ ret += role + ":"
61
+ elif self.sep_style == SeparatorStyle.PLAIN:
62
+ seps = [self.sep, self.sep2]
63
+ ret = self.system
64
+ for i, (role, message) in enumerate(messages):
65
+ if message:
66
+ if type(message) is tuple:
67
+ message, _, _ = message
68
+ ret += message + seps[i % 2]
69
+ else:
70
+ ret += ""
71
+ else:
72
+ raise ValueError(f"Invalid style: {self.sep_style}")
73
+
74
+ return ret
75
+
76
+ def append_message(self, role, message):
77
+ self.messages.append([role, message])
78
+
79
+ def get_images(self, return_pil=False):
80
+ images = []
81
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
82
+ if i % 2 == 0:
83
+ if type(msg) is tuple:
84
+ import base64
85
+ from io import BytesIO
86
+ from PIL import Image
87
+ msg, image, image_process_mode = msg
88
+ if image_process_mode == "Pad":
89
+ def expand2square(pil_img, background_color=(122, 116, 104)):
90
+ width, height = pil_img.size
91
+ if width == height:
92
+ return pil_img
93
+ elif width > height:
94
+ result = Image.new(pil_img.mode, (width, width), background_color)
95
+ result.paste(pil_img, (0, (width - height) // 2))
96
+ return result
97
+ else:
98
+ result = Image.new(pil_img.mode, (height, height), background_color)
99
+ result.paste(pil_img, ((height - width) // 2, 0))
100
+ return result
101
+ image = expand2square(image)
102
+ elif image_process_mode in ["Default", "Crop"]:
103
+ pass
104
+ elif image_process_mode == "Resize":
105
+ image = image.resize((336, 336))
106
+ else:
107
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
108
+ max_hw, min_hw = max(image.size), min(image.size)
109
+ aspect_ratio = max_hw / min_hw
110
+ max_len, min_len = 800, 400
111
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
112
+ longest_edge = int(shortest_edge * aspect_ratio)
113
+ W, H = image.size
114
+ if longest_edge != max(image.size):
115
+ if H > W:
116
+ H, W = longest_edge, shortest_edge
117
+ else:
118
+ H, W = shortest_edge, longest_edge
119
+ image = image.resize((W, H))
120
+ if return_pil:
121
+ images.append(image)
122
+ else:
123
+ buffered = BytesIO()
124
+ image.save(buffered, format="PNG")
125
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
126
+ images.append(img_b64_str)
127
+ return images
128
+
129
+ def to_gradio_chatbot(self):
130
+ ret = []
131
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
132
+ if i % 2 == 0:
133
+ if type(msg) is tuple:
134
+ import base64
135
+ from io import BytesIO
136
+ msg, image, image_process_mode = msg
137
+ max_hw, min_hw = max(image.size), min(image.size)
138
+ aspect_ratio = max_hw / min_hw
139
+ max_len, min_len = 800, 400
140
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
141
+ longest_edge = int(shortest_edge * aspect_ratio)
142
+ W, H = image.size
143
+ if H > W:
144
+ H, W = longest_edge, shortest_edge
145
+ else:
146
+ H, W = shortest_edge, longest_edge
147
+ image = image.resize((W, H))
148
+ buffered = BytesIO()
149
+ image.save(buffered, format="JPEG")
150
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
151
+ img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
152
+ msg = img_str + msg.replace('<image>', '').strip()
153
+ ret.append([msg, None])
154
+ else:
155
+ ret.append([msg, None])
156
+ else:
157
+ ret[-1][-1] = msg
158
+ return ret
159
+
160
+ def copy(self):
161
+ return Conversation(
162
+ system=self.system,
163
+ roles=self.roles,
164
+ messages=[[x, y] for x, y in self.messages],
165
+ offset=self.offset,
166
+ sep_style=self.sep_style,
167
+ sep=self.sep,
168
+ sep2=self.sep2,
169
+ version=self.version)
170
+
171
+ def dict(self):
172
+ if len(self.get_images()) > 0:
173
+ return {
174
+ "system": self.system,
175
+ "roles": self.roles,
176
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
177
+ "offset": self.offset,
178
+ "sep": self.sep,
179
+ "sep2": self.sep2,
180
+ }
181
+ return {
182
+ "system": self.system,
183
+ "roles": self.roles,
184
+ "messages": self.messages,
185
+ "offset": self.offset,
186
+ "sep": self.sep,
187
+ "sep2": self.sep2,
188
+ }
189
+
190
+
191
+ conv_phi_v0 = Conversation(
192
+ system="A chat between a curious user and an artificial intelligence assistant. "
193
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
194
+ roles=("USER", "ASSISTANT"),
195
+ version="v0",
196
+ messages=(),
197
+ offset=0,
198
+ sep_style=SeparatorStyle.TWO,
199
+ sep=" ",
200
+ sep2="<|endoftext|>",
201
+ )
202
+
203
+ conv_llava_plain = Conversation(
204
+ system="",
205
+ roles=("", ""),
206
+ messages=(
207
+ ),
208
+ offset=0,
209
+ sep_style=SeparatorStyle.PLAIN,
210
+ sep="\n",
211
+ )
212
+
213
+ default_conversation = conv_phi_v0
214
+ conv_templates = {
215
+ "default": conv_phi_v0,
216
+ "v0": conv_phi_v0,
217
+ "phi-2_v0": conv_phi_v0,
218
+
219
+ "plain": conv_llava_plain,
220
+ }
221
+
222
+
223
+ if __name__ == "__main__":
224
+ print(default_conversation.get_prompt())
inference/inference.ipynb ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "cdad6b21-030a-40d3-9b31-a229e5b6196d",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import torch\n",
11
+ "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, AutoTokenizer, AutoConfig, CLIPImageProcessor"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 2,
17
+ "id": "1f832710-0e8c-42ec-b581-1b15fd2a6acc",
18
+ "metadata": {},
19
+ "outputs": [
20
+ {
21
+ "name": "stdout",
22
+ "output_type": "stream",
23
+ "text": [
24
+ "[2024-01-25 14:31:58,511] [INFO] [real_accelerator.py:110:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
25
+ ]
26
+ }
27
+ ],
28
+ "source": [
29
+ "from model import LlavaPhiForCausalLM"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 3,
35
+ "id": "9e68f1d4-1ae3-4d45-b818-4600218d2215",
36
+ "metadata": {},
37
+ "outputs": [
38
+ {
39
+ "data": {
40
+ "application/vnd.jupyter.widget-view+json": {
41
+ "model_id": "e5e13e666e3a43d4ad26cc70904abee8",
42
+ "version_major": 2,
43
+ "version_minor": 0
44
+ },
45
+ "text/plain": [
46
+ "Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
47
+ ]
48
+ },
49
+ "metadata": {},
50
+ "output_type": "display_data"
51
+ }
52
+ ],
53
+ "source": [
54
+ "model_name = \"RaviNaik/Llava-Phi2\"\n",
55
+ "model = LlavaPhiForCausalLM.from_pretrained(model_name)"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": 4,
61
+ "id": "49edfa0d-e08a-4d3c-a1d6-34068b122419",
62
+ "metadata": {},
63
+ "outputs": [
64
+ {
65
+ "name": "stderr",
66
+ "output_type": "stream",
67
+ "text": [
68
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
69
+ ]
70
+ }
71
+ ],
72
+ "source": [
73
+ "tokenizer = AutoTokenizer.from_pretrained(model_name)"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": 5,
79
+ "id": "dcec20cd-d946-42d7-8e10-c198cd49b486",
80
+ "metadata": {},
81
+ "outputs": [],
82
+ "source": [
83
+ "image_processor = CLIPImageProcessor.from_pretrained(model_name)\n",
84
+ "mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n",
85
+ "mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": 6,
91
+ "id": "443c13c4-b7e6-4bc5-b6c7-c577bd4708f6",
92
+ "metadata": {},
93
+ "outputs": [],
94
+ "source": [
95
+ "if mm_use_im_patch_token:\n",
96
+ " tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n",
97
+ "if mm_use_im_start_end:\n",
98
+ " tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n",
99
+ " \n",
100
+ "if hasattr(model.config, \"max_sequence_length\"):\n",
101
+ " context_len = model.config.max_sequence_length\n",
102
+ "else:\n",
103
+ " context_len = 2048"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": 7,
109
+ "id": "d8caee43-0d2a-46d4-bdbc-2cfc7dec9e52",
110
+ "metadata": {},
111
+ "outputs": [],
112
+ "source": [
113
+ "from transformers import WhisperProcessor, WhisperForConditionalGeneration"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 8,
119
+ "id": "3acea526-d8ae-4eb6-8dfc-4ea72651b547",
120
+ "metadata": {},
121
+ "outputs": [],
122
+ "source": [
123
+ "class AudioLanguageConnector:\n",
124
+ " def __init__(self, projection_dim):\n",
125
+ " model_name = \"microsoft/phi-2\"\n",
126
+ " self.phi2_tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)\n",
127
+ " self.phi2_tokenizer.pad_token = self.phi2_tokenizer.eos_token\n",
128
+ " self.phi2_tokenizer.max_length = projection_dim\n",
129
+ "\n",
130
+ " def __call__(self, text):\n",
131
+ " text = f\"<audio_start> {text} <audio_end>\"\n",
132
+ " tokens = self.phi2_tokenizer(text, return_tensors=\"pt\", return_attention_mask=False)\n",
133
+ " return tokens\n",
134
+ " \n",
135
+ "\n",
136
+ "class WhisperWithProjection:\n",
137
+ " def __init__(self, projection_dim, device):\n",
138
+ " self.device = device\n",
139
+ " self.processor = WhisperProcessor.from_pretrained(\"openai/whisper-tiny\", device_map=device)\n",
140
+ " self.model = WhisperForConditionalGeneration.from_pretrained(\"openai/whisper-tiny\", device_map=device)\n",
141
+ " self.model.config.forced_decoder_ids = None\n",
142
+ " # self.audio_language_connector = AudioLanguageConnector(projection_dim)\n",
143
+ " \n",
144
+ " def __call__(self, audio):\n",
145
+ " input_features = self.processor(audio[\"array\"],\n",
146
+ " sampling_rate=audio[\"sampling_rate\"],\n",
147
+ " return_tensors=\"pt\").input_features\n",
148
+ " # generate token ids\n",
149
+ " predicted_ids = self.model.generate(input_features.to(self.device))\n",
150
+ " # decode token ids to text \n",
151
+ " transcription = self.processor.batch_decode(predicted_ids, skip_special_tokens=True)\n",
152
+ "\n",
153
+ " # audio_embeddings = self.audio_language_connector(transcription)\n",
154
+ " return transcription"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": 10,
160
+ "id": "a2757c91-2ec1-4fe7-9216-03740bf80061",
161
+ "metadata": {},
162
+ "outputs": [],
163
+ "source": [
164
+ "IGNORE_INDEX = -100\n",
165
+ "IMAGE_TOKEN_INDEX = -200\n",
166
+ "DEFAULT_IMAGE_TOKEN = \"<image>\"\n",
167
+ "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\"\n",
168
+ "DEFAULT_IM_START_TOKEN = \"<im_start>\"\n",
169
+ "DEFAULT_IM_END_TOKEN = \"<im_end>\"\n",
170
+ "\n",
171
+ "from conversation import conv_templates, SeparatorStyle\n",
172
+ "\n",
173
+ "class MultiModalPhi2:\n",
174
+ " def __init__(self, modelname_or_path=\"RaviNaik/Llava-Phi2\",\n",
175
+ " temperature=0.2,\n",
176
+ " max_new_tokens=1024,\n",
177
+ " device=\"cuda:0\"):\n",
178
+ " self.model_name = modelname_or_path\n",
179
+ " self.temperature = temperature\n",
180
+ " self.max_new_tokens = max_new_tokens\n",
181
+ " self.device = device\n",
182
+ " self.disable_torch_init()\n",
183
+ " self.whisper_w_proj = WhisperWithProjection(projection_dim=512, device=device)\n",
184
+ " self.load_pretrained_model()\n",
185
+ " \n",
186
+ " def disable_torch_init(self):\n",
187
+ " \"\"\"\n",
188
+ " Disable the redundant torch default initialization to accelerate model creation.\n",
189
+ " \"\"\"\n",
190
+ " setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n",
191
+ " setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)\n",
192
+ " \n",
193
+ " def load_pretrained_model(self):\n",
194
+ " self.model = LlavaPhiForCausalLM.from_pretrained(self.model_name, device_map=self.device)\n",
195
+ " self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)\n",
196
+ " self.image_processor = CLIPImageProcessor.from_pretrained(self.model_name)\n",
197
+ " mm_use_im_start_end = getattr(self.model.config, \"mm_use_im_start_end\", False)\n",
198
+ " mm_use_im_patch_token = getattr(self.model.config, \"mm_use_im_patch_token\", True)\n",
199
+ " if mm_use_im_patch_token:\n",
200
+ " self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n",
201
+ " if mm_use_im_start_end:\n",
202
+ " self.tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n",
203
+ " \n",
204
+ " def tokenizer_image_token(self, prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n",
205
+ " prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n",
206
+ " \n",
207
+ " def insert_separator(X, sep):\n",
208
+ " return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n",
209
+ " \n",
210
+ " input_ids = []\n",
211
+ " offset = 0\n",
212
+ " if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n",
213
+ " offset = 1\n",
214
+ " input_ids.append(prompt_chunks[0][0])\n",
215
+ " for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n",
216
+ " input_ids.extend(x[offset:])\n",
217
+ " \n",
218
+ " if return_tensors is not None:\n",
219
+ " if return_tensors == 'pt':\n",
220
+ " return torch.tensor(input_ids, dtype=torch.long)\n",
221
+ " raise ValueError(f'Unsupported tensor type: {return_tensors}')\n",
222
+ " return input_ids\n",
223
+ " \n",
224
+ " def __call__(self, text, audio, image):\n",
225
+ " qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\\n' + text\n",
226
+ " conv = conv_templates[\"phi-2_v0\"].copy()\n",
227
+ " conv.append_message(conv.roles[0], qs)\n",
228
+ " conv.append_message(conv.roles[1], None)\n",
229
+ " prompt = conv.get_prompt()\n",
230
+ "\n",
231
+ " image_tensor = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'].to(self.device)\n",
232
+ " \n",
233
+ " input_ids = self.tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0)\n",
234
+ " if audio is not None:\n",
235
+ " audio_transcript = self.whisper_w_proj(audio)\n",
236
+ " audio_embed = self.tokenizer(audio_transcript, return_tensors='pt')[\"input_ids\"]\n",
237
+ " input_ids = torch.concat([input_ids, audio_embed], dim=1)\n",
238
+ " input_ids = input_ids.to(self.device)\n",
239
+ " \n",
240
+ " stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2\n",
241
+ "\n",
242
+ " with torch.inference_mode():\n",
243
+ " output_ids = self.model.generate(\n",
244
+ " input_ids,\n",
245
+ " images=image_tensor,\n",
246
+ " do_sample=True,\n",
247
+ " temperature=self.temperature,\n",
248
+ " max_new_tokens=self.max_new_tokens,\n",
249
+ " eos_token_id=self.tokenizer.eos_token_id, # End of sequence token\n",
250
+ " pad_token_id=self.tokenizer.eos_token_id, # Pad token\n",
251
+ " use_cache=True,\n",
252
+ " )\n",
253
+ "\n",
254
+ " input_token_len = input_ids.shape[1]\n",
255
+ " n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()\n",
256
+ " if n_diff_input_output > 0:\n",
257
+ " print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')\n",
258
+ " outputs = self.tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]\n",
259
+ " outputs = outputs.strip()\n",
260
+ " if outputs.endswith(stop_str):\n",
261
+ " outputs = outputs[:-len(stop_str)]\n",
262
+ " outputs = outputs.strip()\n",
263
+ " return outputs"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": 11,
269
+ "id": "cc47e6a0-3544-4a60-930f-ccae87ef945a",
270
+ "metadata": {},
271
+ "outputs": [
272
+ {
273
+ "data": {
274
+ "application/vnd.jupyter.widget-view+json": {
275
+ "model_id": "9ef56077307d4cef907e25b092061611",
276
+ "version_major": 2,
277
+ "version_minor": 0
278
+ },
279
+ "text/plain": [
280
+ "Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
281
+ ]
282
+ },
283
+ "metadata": {},
284
+ "output_type": "display_data"
285
+ },
286
+ {
287
+ "name": "stderr",
288
+ "output_type": "stream",
289
+ "text": [
290
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
291
+ ]
292
+ }
293
+ ],
294
+ "source": [
295
+ "multimodal_phi2 = MultiModalPhi2()"
296
+ ]
297
+ },
298
+ {
299
+ "cell_type": "code",
300
+ "execution_count": 12,
301
+ "id": "cb8aca1b-7d75-45e7-b5a4-71d151f792e1",
302
+ "metadata": {},
303
+ "outputs": [],
304
+ "source": [
305
+ "from PIL import Image\n",
306
+ "import requests\n",
307
+ "\n",
308
+ "url = \"https://www.ilankelman.org/stopsigns/australia.jpg\"\n",
309
+ "image = Image.open(requests.get(url, stream=True).raw)\n",
310
+ "\n",
311
+ "from datasets import load_dataset\n",
312
+ "audio_ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n",
313
+ "audio = audio_ds[0][\"audio\"]\n",
314
+ "\n",
315
+ "text = \"tell me about the image\""
316
+ ]
317
+ },
318
+ {
319
+ "cell_type": "code",
320
+ "execution_count": 14,
321
+ "id": "6767efc6-be4f-44d3-84ff-34db57d9f940",
322
+ "metadata": {},
323
+ "outputs": [
324
+ {
325
+ "data": {
326
+ "text/plain": [
327
+ "'In the image, there is a Chinese writing on a pole in a foreign language. This suggests that the image was taken in a foreign country, possibly in a foreign country. The sign is in a foreign language, which might be in a foreign language. The sign is written in Japanese, which is a common language in Japan. The sign is also written in two different languages, which suggests that it is written in a language that is not in the native language.'"
328
+ ]
329
+ },
330
+ "execution_count": 14,
331
+ "metadata": {},
332
+ "output_type": "execute_result"
333
+ }
334
+ ],
335
+ "source": [
336
+ "multimodal_phi2(text, None, image)"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": null,
342
+ "id": "0bdd0b8a-709b-4c82-ac1d-dc746d3a0748",
343
+ "metadata": {},
344
+ "outputs": [],
345
+ "source": []
346
+ }
347
+ ],
348
+ "metadata": {
349
+ "kernelspec": {
350
+ "display_name": "Python 3 (ipykernel)",
351
+ "language": "python",
352
+ "name": "python3"
353
+ },
354
+ "language_info": {
355
+ "codemirror_mode": {
356
+ "name": "ipython",
357
+ "version": 3
358
+ },
359
+ "file_extension": ".py",
360
+ "mimetype": "text/x-python",
361
+ "name": "python",
362
+ "nbconvert_exporter": "python",
363
+ "pygments_lexer": "ipython3",
364
+ "version": "3.10.12"
365
+ }
366
+ },
367
+ "nbformat": 4,
368
+ "nbformat_minor": 5
369
+ }
inference/main.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import soundfile as sf
2
+ import librosa
3
+ import torch
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ CLIPImageProcessor,
7
+ WhisperProcessor,
8
+ WhisperForConditionalGeneration,
9
+ )
10
+
11
+ from .model import LlavaPhiForCausalLM
12
+ from .conversation import conv_templates, SeparatorStyle
13
+
14
+ IGNORE_INDEX = -100
15
+ IMAGE_TOKEN_INDEX = -200
16
+ DEFAULT_IMAGE_TOKEN = "<image>"
17
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
18
+ DEFAULT_IM_START_TOKEN = "<im_start>"
19
+ DEFAULT_IM_END_TOKEN = "<im_end>"
20
+
21
+
22
+ class AudioLanguageConnector:
23
+ def __init__(self, projection_dim):
24
+ model_name = "microsoft/phi-2"
25
+ self.phi2_tokenizer = AutoTokenizer.from_pretrained(
26
+ model_name, trust_remote_code=True
27
+ )
28
+ self.phi2_tokenizer.pad_token = self.phi2_tokenizer.eos_token
29
+ self.phi2_tokenizer.max_length = projection_dim
30
+
31
+ def __call__(self, text):
32
+ text = f"<audio_start> {text} <audio_end>"
33
+ tokens = self.phi2_tokenizer(
34
+ text, return_tensors="pt", return_attention_mask=False
35
+ )
36
+ return tokens
37
+
38
+
39
+ class WhisperWithProjection:
40
+ def __init__(self, projection_dim, device):
41
+ self.device = device
42
+ self.processor = WhisperProcessor.from_pretrained(
43
+ "openai/whisper-tiny", device_map=device
44
+ )
45
+ self.model = WhisperForConditionalGeneration.from_pretrained(
46
+ "openai/whisper-tiny", device_map=device
47
+ )
48
+ self.model.config.forced_decoder_ids = None
49
+ # self.audio_language_connector = AudioLanguageConnector(projection_dim)
50
+
51
+ def __call__(self, audio):
52
+ array, sampling_rate = sf.read(audio)
53
+ resampled_array = librosa.resample(
54
+ array,
55
+ orig_sr=sampling_rate,
56
+ target_sr=16000,
57
+ )
58
+ input_features = self.processor(
59
+ resampled_array, sampling_rate=16000, return_tensors="pt"
60
+ ).input_features
61
+ # generate token ids
62
+ predicted_ids = self.model.generate(input_features.to(self.device))
63
+ # decode token ids to text
64
+ transcription = self.processor.batch_decode(
65
+ predicted_ids, skip_special_tokens=True
66
+ )
67
+
68
+ # audio_embeddings = self.audio_language_connector(transcription)
69
+ return transcription
70
+
71
+
72
+ class MultiModalPhi2:
73
+ def __init__(
74
+ self,
75
+ modelname_or_path="RaviNaik/Llava-Phi2",
76
+ temperature=0.2,
77
+ max_new_tokens=1024,
78
+ device="cuda:0",
79
+ ):
80
+ self.model_name = modelname_or_path
81
+ self.temperature = temperature
82
+ self.max_new_tokens = max_new_tokens
83
+ self.device = device
84
+ self.disable_torch_init()
85
+ self.whisper_w_proj = WhisperWithProjection(projection_dim=512, device=device)
86
+ self.load_pretrained_model()
87
+
88
+ def disable_torch_init(self):
89
+ """
90
+ Disable the redundant torch default initialization to accelerate model creation.
91
+ """
92
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
93
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
94
+
95
+ def load_pretrained_model(self):
96
+ self.model = LlavaPhiForCausalLM.from_pretrained(
97
+ self.model_name, device_map=self.device
98
+ )
99
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
100
+ self.image_processor = CLIPImageProcessor.from_pretrained(self.model_name)
101
+ mm_use_im_start_end = getattr(self.model.config, "mm_use_im_start_end", False)
102
+ mm_use_im_patch_token = getattr(
103
+ self.model.config, "mm_use_im_patch_token", True
104
+ )
105
+ if mm_use_im_patch_token:
106
+ self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
107
+ if mm_use_im_start_end:
108
+ self.tokenizer.add_tokens(
109
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
110
+ )
111
+
112
+ def tokenizer_image_token(
113
+ self,
114
+ prompt,
115
+ tokenizer,
116
+ image_token_index=IMAGE_TOKEN_INDEX,
117
+ return_tensors=None,
118
+ ):
119
+ prompt_chunks = [
120
+ tokenizer(chunk).input_ids for chunk in prompt.split("<image>")
121
+ ]
122
+
123
+ def insert_separator(X, sep):
124
+ return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
125
+
126
+ input_ids = []
127
+ offset = 0
128
+ if (
129
+ len(prompt_chunks) > 0
130
+ and len(prompt_chunks[0]) > 0
131
+ and prompt_chunks[0][0] == tokenizer.bos_token_id
132
+ ):
133
+ offset = 1
134
+ input_ids.append(prompt_chunks[0][0])
135
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
136
+ input_ids.extend(x[offset:])
137
+
138
+ if return_tensors is not None:
139
+ if return_tensors == "pt":
140
+ return torch.tensor(input_ids, dtype=torch.long)
141
+ raise ValueError(f"Unsupported tensor type: {return_tensors}")
142
+ return input_ids
143
+
144
+ def __call__(self, text, audio, image):
145
+ if text is None:
146
+ text = ""
147
+ if image is not None:
148
+ qs = (
149
+ DEFAULT_IM_START_TOKEN
150
+ + DEFAULT_IMAGE_TOKEN
151
+ + DEFAULT_IM_END_TOKEN
152
+ + "\n"
153
+ + text
154
+ )
155
+ conv = conv_templates["phi-2_v0"].copy()
156
+ conv.append_message(conv.roles[0], qs)
157
+ conv.append_message(conv.roles[1], None)
158
+ prompt = conv.get_prompt()
159
+
160
+ input_ids = self.tokenizer_image_token(
161
+ prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
162
+ ).unsqueeze(0)
163
+
164
+ image_tensor = self.image_processor.preprocess(image, return_tensors="pt")[
165
+ "pixel_values"
166
+ ].to(self.device)
167
+ else:
168
+ qs = text
169
+ conv = conv_templates["phi-2_v0"].copy()
170
+ conv.append_message(conv.roles[0], qs)
171
+ conv.append_message(conv.roles[1], None)
172
+ prompt = conv.get_prompt()
173
+
174
+ input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
175
+
176
+ image_tensor = None
177
+
178
+ if audio is not None:
179
+ audio_transcript = self.whisper_w_proj(audio)
180
+ audio_embed = self.tokenizer(audio_transcript, return_tensors="pt")[
181
+ "input_ids"
182
+ ]
183
+ input_ids = torch.concat([input_ids, audio_embed], dim=1)
184
+ input_ids = input_ids.to(self.device)
185
+
186
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
187
+
188
+ with torch.inference_mode():
189
+ if image is not None:
190
+ output_ids = self.model.generate(
191
+ input_ids,
192
+ images=image_tensor,
193
+ do_sample=True,
194
+ temperature=self.temperature,
195
+ max_new_tokens=self.max_new_tokens,
196
+ eos_token_id=self.tokenizer.eos_token_id, # End of sequence token
197
+ pad_token_id=self.tokenizer.eos_token_id, # Pad token
198
+ use_cache=True,
199
+ )
200
+ else:
201
+ output_ids = self.model.generate(
202
+ input_ids,
203
+ do_sample=True,
204
+ temperature=self.temperature,
205
+ max_new_tokens=self.max_new_tokens,
206
+ eos_token_id=self.tokenizer.eos_token_id, # End of sequence token
207
+ pad_token_id=self.tokenizer.eos_token_id, # Pad token
208
+ use_cache=True,
209
+ )
210
+
211
+ input_token_len = input_ids.shape[1]
212
+ n_diff_input_output = (
213
+ (input_ids != output_ids[:, :input_token_len]).sum().item()
214
+ )
215
+ if n_diff_input_output > 0:
216
+ print(
217
+ f"[Warning] {n_diff_input_output} output_ids are not the same as the input_ids"
218
+ )
219
+ outputs = self.tokenizer.batch_decode(
220
+ output_ids[:, input_token_len:], skip_special_tokens=True
221
+ )[0]
222
+ outputs = outputs.strip()
223
+ if outputs.endswith(stop_str):
224
+ outputs = outputs[: -len(stop_str)]
225
+ outputs = outputs.strip()
226
+ return outputs
inference/model/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .language_model.llava_phi import LlavaPhiForCausalLM
2
+ from .language_model.configuration_llava_phi import LlavaPhiConfig, LlavaPhiVisionConfig, ProjectorConfig
inference/model/builder.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ AutoModelForCausalLM,
7
+ AutoConfig,
8
+ BitsAndBytesConfig,
9
+ CLIPImageProcessor,
10
+ )
11
+ import torch
12
+ from .language_model.llava_phi import LlavaPhiForCausalLM
13
+ from .language_model.configuration_llava_phi import LlavaPhiConfig
14
+
15
+ IGNORE_INDEX = -100
16
+ IMAGE_TOKEN_INDEX = -200
17
+ DEFAULT_IMAGE_TOKEN = "<image>"
18
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
19
+ DEFAULT_IM_START_TOKEN = "<im_start>"
20
+ DEFAULT_IM_END_TOKEN = "<im_end>"
21
+
22
+
23
+ def load_pretrained_model(
24
+ model_path,
25
+ model_base,
26
+ model_name,
27
+ load_8bit=False,
28
+ load_4bit=False,
29
+ device_map="cuda",
30
+ device="cuda",
31
+ ):
32
+ kwargs = {"device_map": device_map}
33
+ if load_8bit:
34
+ kwargs["load_in_8bit"] = True
35
+ elif load_4bit:
36
+ kwargs["load_in_4bit"] = True
37
+ kwargs["quantization_config"] = BitsAndBytesConfig(
38
+ load_in_4bit=True,
39
+ bnb_4bit_compute_dtype=torch.float16,
40
+ bnb_4bit_use_double_quant=True,
41
+ bnb_4bit_quant_type="nf4",
42
+ )
43
+ # else: # TODO: after fine-tuning LLava-Phi, load the model weights with fp16 will pose nan
44
+ # kwargs['torch_dtype'] = torch.float16
45
+
46
+ if "phi" in model_name.lower():
47
+ # Load LLaVA-Phi model
48
+ if "lora" in model_name.lower() and model_base is None:
49
+ warnings.warn(
50
+ "There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument."
51
+ )
52
+ if "lora" in model_name.lower() and model_base is not None:
53
+ lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
54
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
55
+ print("Loading LLaVA-Phi from base model...")
56
+ model = LlavaPhiForCausalLM.from_pretrained(
57
+ model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs
58
+ )
59
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
60
+ if model.lm_head.weight.shape[0] != token_num:
61
+ model.lm_head.weight = torch.nn.Parameter(
62
+ torch.empty(
63
+ token_num, tokem_dim, device=model.device, dtype=model.dtype
64
+ )
65
+ )
66
+ model.model.embed_tokens.weight = torch.nn.Parameter(
67
+ torch.empty(
68
+ token_num, tokem_dim, device=model.device, dtype=model.dtype
69
+ )
70
+ )
71
+
72
+ print("Loading additional LLaVA-Phi weights...")
73
+ if os.path.exists(os.path.join(model_path, "non_lora_trainables.bin")):
74
+ non_lora_trainables = torch.load(
75
+ os.path.join(model_path, "non_lora_trainables.bin"),
76
+ map_location="cpu",
77
+ )
78
+ else:
79
+ # this is probably from HF Hub
80
+ from huggingface_hub import hf_hub_download
81
+
82
+ def load_from_hf(repo_id, filename, subfolder=None):
83
+ cache_file = hf_hub_download(
84
+ repo_id=repo_id, filename=filename, subfolder=subfolder
85
+ )
86
+ return torch.load(cache_file, map_location="cpu")
87
+
88
+ non_lora_trainables = load_from_hf(
89
+ model_path, "non_lora_trainables.bin"
90
+ )
91
+ non_lora_trainables = {
92
+ (k[11:] if k.startswith("base_model.") else k): v
93
+ for k, v in non_lora_trainables.items()
94
+ }
95
+ if any(k.startswith("model.model.") for k in non_lora_trainables):
96
+ non_lora_trainables = {
97
+ (k[6:] if k.startswith("model.") else k): v
98
+ for k, v in non_lora_trainables.items()
99
+ }
100
+ model.load_state_dict(non_lora_trainables, strict=False)
101
+
102
+ from peft import PeftModel
103
+
104
+ print("Loading LoRA weights...")
105
+ model = PeftModel.from_pretrained(model, model_path)
106
+ print("Merging LoRA weights...")
107
+ model = model.merge_and_unload()
108
+ print("Model is loaded...")
109
+ elif model_base is not None:
110
+ # this may be mm projector only
111
+ print("Loading LLaVA-Phi from base model...")
112
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
113
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
114
+ model = LlavaPhiForCausalLM.from_pretrained(
115
+ model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs
116
+ )
117
+
118
+ mm_projector_weights = torch.load(
119
+ os.path.join(model_path, "mm_projector.bin"), map_location="cpu"
120
+ )
121
+ mm_projector_weights = {
122
+ k: v.to(torch.float16) for k, v in mm_projector_weights.items()
123
+ }
124
+ model.load_state_dict(mm_projector_weights, strict=False)
125
+ else:
126
+ print("load llaVA-Phi MLLM!!!")
127
+ config = LlavaPhiConfig.from_pretrained(model_path, trust_remote_code=True)
128
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
129
+ model = LlavaPhiForCausalLM.from_pretrained(
130
+ model_path, config=config, use_safetensors=True, **kwargs
131
+ ).to("cuda")
132
+ else:
133
+ # Load language model
134
+ if model_base is not None:
135
+ # PEFT model
136
+ from peft import PeftModel
137
+
138
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
139
+ model = AutoModelForCausalLM.from_pretrained(
140
+ model_base,
141
+ torch_dtype=torch.float16,
142
+ low_cpu_mem_usage=True,
143
+ device_map="auto",
144
+ )
145
+ print(f"Loading LoRA weights from {model_path}")
146
+ model = PeftModel.from_pretrained(model, model_path)
147
+ print(f"Merging weights")
148
+ model = model.merge_and_unload()
149
+ print("Convert to FP16...")
150
+ model.to(torch.float16)
151
+ else:
152
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
153
+ model = AutoModelForCausalLM.from_pretrained(
154
+ model_path, low_cpu_mem_usage=True, **kwargs
155
+ )
156
+
157
+ image_processor = CLIPImageProcessor.from_pretrained(model_path)
158
+
159
+ if "phi" in model_name.lower():
160
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
161
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
162
+
163
+ # TODO: the tokenizer length of phi-2 is 50295, but the output class of lm_head is 51200
164
+ if mm_use_im_patch_token:
165
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
166
+ if mm_use_im_start_end:
167
+ tokenizer.add_tokens(
168
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
169
+ )
170
+ # model.resize_token_embeddings(len(tokenizer))
171
+ else:
172
+ raise ValueError(f"Unsupported model name: {model_name}")
173
+
174
+ if hasattr(model.config, "max_sequence_length"):
175
+ context_len = model.config.max_sequence_length
176
+ else:
177
+ context_len = 2048
178
+ model.to(device="cuda")
179
+ print(kwargs)
180
+ return tokenizer, model, image_processor, context_len
inference/model/language_model/configuration_llava_phi.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Union
3
+ from transformers import PretrainedConfig, PhiConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+
9
+ class LlavaPhiVisionConfig(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
12
+ CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
13
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
14
+ [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
15
+
16
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
17
+ documentation from [`PretrainedConfig`] for more information.
18
+
19
+ Args:
20
+ hidden_size (`int`, *optional*, defaults to 768):
21
+ Dimensionality of the encoder layers and the pooler layer.
22
+ intermediate_size (`int`, *optional*, defaults to 3072):
23
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
24
+ projection_dim (`int`, *optional*, defaults to 512):
25
+ Dimentionality of text and vision projection layers.
26
+ num_hidden_layers (`int`, *optional*, defaults to 12):
27
+ Number of hidden layers in the Transformer encoder.
28
+ num_attention_heads (`int`, *optional*, defaults to 12):
29
+ Number of attention heads for each attention layer in the Transformer encoder.
30
+ num_channels (`int`, *optional*, defaults to 3):
31
+ The number of input channels.
32
+ image_size (`int`, *optional*, defaults to 224):
33
+ The size (resolution) of each image.
34
+ patch_size (`int`, *optional*, defaults to 32):
35
+ The size (resolution) of each patch.
36
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
37
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
38
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
39
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
40
+ The epsilon used by the layer normalization layers.
41
+ attention_dropout (`float`, *optional*, defaults to 0.0):
42
+ The dropout ratio for the attention probabilities.
43
+ initializer_range (`float`, *optional*, defaults to 0.02):
44
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
45
+ initializer_factor (`float`, *optional*, defaults to 1.0):
46
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
47
+ testing).
48
+ mm_vision_select_feature (`str`, *optional*, defaults to `"patch"`):
49
+ The feature to select from the vision encoder output. Can be one of `"patch"` or `"cls_patch"`.
50
+ mm_vision_select_layer (`int`, *optional*, defaults to `-2`):
51
+ The layer to select from the vision encoder output.
52
+
53
+ Example:
54
+
55
+ ```python
56
+ >>> from transformers import CLIPVisionConfig, CLIPVisionModel
57
+
58
+ >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
59
+ >>> configuration = CLIPVisionConfig()
60
+
61
+ >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
62
+ >>> model = CLIPVisionModel(configuration)
63
+
64
+ >>> # Accessing the model configuration
65
+ >>> configuration = model.config
66
+ ```"""
67
+
68
+ model_type = "llava_phi_clip_vision_model"
69
+
70
+ def __init__(
71
+ self,
72
+ hidden_size=768,
73
+ intermediate_size=3072,
74
+ projection_dim=512,
75
+ num_hidden_layers=12,
76
+ num_attention_heads=12,
77
+ num_channels=3,
78
+ image_size=224,
79
+ patch_size=32,
80
+ hidden_act="quick_gelu",
81
+ layer_norm_eps=1e-5,
82
+ attention_dropout=0.0,
83
+ initializer_range=0.02,
84
+ initializer_factor=1.0,
85
+ mm_vision_select_feature="patch",
86
+ mm_vision_select_layer=-2,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(**kwargs)
90
+
91
+ self.hidden_size = hidden_size
92
+ self.intermediate_size = intermediate_size
93
+ self.projection_dim = projection_dim
94
+ self.num_hidden_layers = num_hidden_layers
95
+ self.num_attention_heads = num_attention_heads
96
+ self.num_channels = num_channels
97
+ self.patch_size = patch_size
98
+ self.image_size = image_size
99
+ self.initializer_range = initializer_range
100
+ self.initializer_factor = initializer_factor
101
+ self.attention_dropout = attention_dropout
102
+ self.layer_norm_eps = layer_norm_eps
103
+ self.hidden_act = hidden_act
104
+ self.mm_vision_select_feature = mm_vision_select_feature
105
+ self.mm_vision_select_layer = mm_vision_select_layer
106
+
107
+ @classmethod
108
+ def from_pretrained(
109
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
110
+ ) -> "PretrainedConfig":
111
+ cls._set_token_in_kwargs(kwargs)
112
+
113
+ config_dict, kwargs = cls.get_config_dict(
114
+ pretrained_model_name_or_path, **kwargs
115
+ )
116
+
117
+ # get the vision config dict if we are loading from CLIPConfig
118
+ if config_dict.get("model_type") == "llava_phi-phi":
119
+ config_dict = config_dict["vision_config"]
120
+
121
+ if (
122
+ "model_type" in config_dict
123
+ and hasattr(cls, "model_type")
124
+ and config_dict["model_type"] != cls.model_type
125
+ ):
126
+ logger.warning(
127
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
128
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
129
+ )
130
+
131
+ return cls.from_dict(config_dict, **kwargs)
132
+
133
+
134
+ class ProjectorConfig(PretrainedConfig):
135
+ model_type = "llava_phi_projector"
136
+
137
+ def __init__(
138
+ self, mm_projector_type="linear", mm_hidden_size=768, hidden_size=2560, **kwargs
139
+ ):
140
+ self.mm_projector_type = mm_projector_type
141
+ self.mm_hidden_size = mm_hidden_size
142
+ self.hidden_size = hidden_size
143
+ super().__init__(**kwargs)
144
+
145
+ @classmethod
146
+ def from_pretrained(
147
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
148
+ ) -> "PretrainedConfig":
149
+ cls._set_token_in_kwargs(kwargs)
150
+
151
+ config_dict, kwargs = cls.get_config_dict(
152
+ pretrained_model_name_or_path, **kwargs
153
+ )
154
+
155
+ # get the vision config dict if we are loading from CLIPConfig
156
+ if config_dict.get("model_type") == "llava_phi-phi":
157
+ config_dict = config_dict["projector_config"]
158
+
159
+ if (
160
+ "model_type" in config_dict
161
+ and hasattr(cls, "model_type")
162
+ and config_dict["model_type"] != cls.model_type
163
+ ):
164
+ logger.warning(
165
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
166
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
167
+ )
168
+
169
+ return cls.from_dict(config_dict, **kwargs)
170
+
171
+
172
+ DEFAULT_VISUAL_CONFIG = {
173
+ "vision_tower": LlavaPhiVisionConfig().to_dict(),
174
+ "mm_projector": ProjectorConfig().to_dict(),
175
+ }
176
+
177
+
178
+ class LlavaPhiConfig(PhiConfig):
179
+ model_type = "llava_phi"
180
+
181
+ def __init__(self, vision_config=None, **kwargs):
182
+ if vision_config is None:
183
+ self.vision_config = DEFAULT_VISUAL_CONFIG
184
+ else:
185
+ self.vision_config = vision_config
186
+
187
+ super().__init__(**kwargs)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ print(LlavaPhiVisionConfig())
inference/model/language_model/llava_phi.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn import CrossEntropyLoss
7
+
8
+ from transformers import AutoConfig, AutoModelForCausalLM, \
9
+ PhiModel, PhiPreTrainedModel
10
+
11
+ from transformers.modeling_outputs import CausalLMOutputWithPast
12
+ from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
13
+ from transformers.utils import logging
14
+ from .configuration_llava_phi import LlavaPhiConfig
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class LLavaPhiModel(LlavaMetaModel, PhiModel):
20
+ config_class = LlavaPhiConfig
21
+
22
+ def __init__(self, config):
23
+ super(LLavaPhiModel, self).__init__(config)
24
+
25
+
26
+ class LlavaPhiForCausalLM(PhiPreTrainedModel, LlavaMetaForCausalLM):
27
+ config_class = LlavaPhiConfig
28
+
29
+ def __init__(self, config):
30
+ super(PhiPreTrainedModel, self).__init__(config)
31
+ self.model = LLavaPhiModel(config)
32
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
33
+
34
+ # Initialize weights and apply final processing
35
+ self.post_init()
36
+
37
+ def get_model(self):
38
+ return self.model
39
+
40
+ def forward(
41
+ self,
42
+ input_ids: torch.LongTensor = None,
43
+ attention_mask: Optional[torch.Tensor] = None,
44
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
45
+ inputs_embeds: Optional[torch.FloatTensor] = None,
46
+ labels: Optional[torch.LongTensor] = None,
47
+ use_cache: Optional[bool] = None,
48
+ output_attentions: Optional[bool] = None,
49
+ output_hidden_states: Optional[bool] = None,
50
+ images: Optional[torch.FloatTensor] = None,
51
+ return_dict: Optional[bool] = None,
52
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
53
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
54
+ output_hidden_states = (
55
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
56
+ )
57
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
58
+
59
+ input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(
60
+ input_ids, attention_mask, past_key_values, labels, images)
61
+
62
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
63
+ outputs = self.model(
64
+ input_ids=input_ids,
65
+ attention_mask=attention_mask,
66
+ past_key_values=past_key_values,
67
+ inputs_embeds=inputs_embeds,
68
+ use_cache=use_cache,
69
+ output_attentions=output_attentions,
70
+ output_hidden_states=output_hidden_states,
71
+ return_dict=return_dict
72
+ )
73
+
74
+ hidden_states = outputs[0]
75
+ logits = self.lm_head(hidden_states)
76
+
77
+ loss = None
78
+ if labels is not None:
79
+ # Shift so that tokens < n predict n
80
+ shift_logits = logits[..., :-1, :].contiguous()
81
+ shift_labels = labels[..., 1:].contiguous()
82
+ # Flatten the tokens
83
+ loss_fct = CrossEntropyLoss()
84
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
85
+ shift_labels = shift_labels.view(-1)
86
+ # Enable model/pipeline parallelism
87
+ shift_labels = shift_labels.to(shift_logits.device)
88
+ loss = loss_fct(shift_logits, shift_labels)
89
+
90
+ if not return_dict:
91
+ output = (logits,) + outputs[1:]
92
+ return (loss,) + output if loss is not None else output
93
+
94
+ return CausalLMOutputWithPast(
95
+ loss=loss,
96
+ logits=logits,
97
+ past_key_values=outputs.past_key_values,
98
+ hidden_states=outputs.hidden_states,
99
+ attentions=outputs.attentions,
100
+ )
101
+
102
+ def prepare_inputs_for_generation(
103
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
104
+ ):
105
+ if past_key_values:
106
+ input_ids = input_ids[:, -1:]
107
+
108
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
109
+ if inputs_embeds is not None and past_key_values is None:
110
+ model_inputs = {"inputs_embeds": inputs_embeds}
111
+ else:
112
+ model_inputs = {"input_ids": input_ids}
113
+
114
+ model_inputs.update(
115
+ {
116
+ "past_key_values": past_key_values,
117
+ "use_cache": kwargs.get("use_cache"),
118
+ "attention_mask": attention_mask,
119
+ "images": kwargs.get("images", None),
120
+ }
121
+ )
122
+ return model_inputs
123
+
124
+
125
+ AutoConfig.register("llava_phi", LlavaPhiConfig)
126
+ AutoModelForCausalLM.register(LlavaPhiConfig, LlavaPhiForCausalLM)
inference/model/llava_arch.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ import torch
19
+
20
+ from .multimodal_encoder.clip_encoder import CLIPVisionTower
21
+ from .multimodal_projector.builder import build_vision_projector
22
+ from .language_model.configuration_llava_phi import (
23
+ LlavaPhiConfig,
24
+ LlavaPhiVisionConfig,
25
+ ProjectorConfig,
26
+ )
27
+
28
+ # from llava_phi.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
29
+ IGNORE_INDEX = -100
30
+ IMAGE_TOKEN_INDEX = -200
31
+ DEFAULT_IMAGE_TOKEN = "<image>"
32
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
33
+ DEFAULT_IM_START_TOKEN = "<im_start>"
34
+ DEFAULT_IM_END_TOKEN = "<im_end>"
35
+
36
+
37
+ class LlavaMetaModel:
38
+ def __init__(self, config):
39
+ super(LlavaMetaModel, self).__init__(config)
40
+ self.vision_tower = CLIPVisionTower(
41
+ LlavaPhiVisionConfig(**config.vision_config["vision_tower"])
42
+ )
43
+ self.mm_projector = build_vision_projector(
44
+ ProjectorConfig(**config.vision_config["mm_projector"])
45
+ )
46
+
47
+ def get_vision_tower(self):
48
+ vision_tower = getattr(self, "vision_tower", None)
49
+ if type(vision_tower) is list:
50
+ vision_tower = vision_tower[0]
51
+ return vision_tower
52
+
53
+
54
+ class LlavaMetaForCausalLM(ABC):
55
+ @abstractmethod
56
+ def get_model(self):
57
+ pass
58
+
59
+ def get_vision_tower(self):
60
+ return self.get_model().get_vision_tower()
61
+
62
+ def encode_images(self, images):
63
+ image_features = self.get_model().get_vision_tower()(images)
64
+ image_features = self.get_model().mm_projector(image_features)
65
+ return image_features
66
+
67
+ def prepare_inputs_labels_for_multimodal(
68
+ self, input_ids, attention_mask, past_key_values, labels, images
69
+ ):
70
+ vision_tower = self.get_vision_tower()
71
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
72
+ if (
73
+ past_key_values is not None
74
+ and vision_tower is not None
75
+ and images is not None
76
+ and input_ids.shape[1] == 1
77
+ ):
78
+ attention_mask = torch.ones(
79
+ (attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1),
80
+ dtype=attention_mask.dtype,
81
+ device=attention_mask.device,
82
+ )
83
+ return input_ids, attention_mask, past_key_values, None, labels
84
+
85
+ if type(images) is list or images.ndim == 5:
86
+ concat_images = torch.cat([image for image in images], dim=0)
87
+ image_features = self.encode_images(concat_images)
88
+ split_sizes = [image.shape[0] for image in images]
89
+ image_features = torch.split(image_features, split_sizes, dim=0)
90
+ image_features = [x.flatten(0, 1) for x in image_features]
91
+ else:
92
+ image_features = self.encode_images(images)
93
+
94
+ new_input_embeds = []
95
+ new_labels = [] if labels is not None else None
96
+ cur_image_idx = 0
97
+ for batch_idx, cur_input_ids in enumerate(input_ids):
98
+ if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
99
+ # multimodal LLM, but the current sample is not multimodal
100
+ # FIXME: this is a hacky fix, for deepspeed zero3 to work
101
+ half_len = cur_input_ids.shape[0] // 2
102
+ cur_image_features = image_features[cur_image_idx]
103
+ cur_input_embeds_1 = self.get_model().embed_tokens(
104
+ cur_input_ids[:half_len]
105
+ )
106
+ cur_input_embeds_2 = self.get_model().embed_tokens(
107
+ cur_input_ids[half_len:]
108
+ )
109
+ cur_input_embeds = torch.cat(
110
+ [cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2],
111
+ dim=0,
112
+ )
113
+ new_input_embeds.append(cur_input_embeds)
114
+ if labels is not None:
115
+ new_labels.append(labels[batch_idx])
116
+ cur_image_idx += 1
117
+ continue
118
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
119
+ cur_new_input_embeds = []
120
+ if labels is not None:
121
+ cur_labels = labels[batch_idx]
122
+ cur_new_labels = []
123
+ assert cur_labels.shape == cur_input_ids.shape
124
+ while image_token_indices.numel() > 0:
125
+ cur_image_features = image_features[cur_image_idx]
126
+ image_token_start = image_token_indices[0]
127
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
128
+ self.config, "mm_use_im_start_end", False
129
+ ):
130
+ cur_new_input_embeds.append(
131
+ self.get_model()
132
+ .embed_tokens(cur_input_ids[: image_token_start - 1])
133
+ .detach()
134
+ )
135
+ cur_new_input_embeds.append(
136
+ self.get_model().embed_tokens(
137
+ cur_input_ids[image_token_start - 1 : image_token_start]
138
+ )
139
+ )
140
+ cur_new_input_embeds.append(cur_image_features)
141
+ cur_new_input_embeds.append(
142
+ self.get_model().embed_tokens(
143
+ cur_input_ids[image_token_start + 1 : image_token_start + 2]
144
+ )
145
+ )
146
+ if labels is not None:
147
+ cur_new_labels.append(cur_labels[:image_token_start])
148
+ cur_new_labels.append(
149
+ torch.full(
150
+ (cur_image_features.shape[0],),
151
+ IGNORE_INDEX,
152
+ device=labels.device,
153
+ dtype=labels.dtype,
154
+ )
155
+ )
156
+ cur_new_labels.append(
157
+ cur_labels[image_token_start : image_token_start + 1]
158
+ )
159
+ cur_labels = cur_labels[image_token_start + 2 :]
160
+ else:
161
+ cur_new_input_embeds.append(
162
+ self.get_model().embed_tokens(cur_input_ids[:image_token_start])
163
+ )
164
+ cur_new_input_embeds.append(cur_image_features)
165
+ if labels is not None:
166
+ cur_new_labels.append(cur_labels[:image_token_start])
167
+ cur_new_labels.append(
168
+ torch.full(
169
+ (cur_image_features.shape[0],),
170
+ IGNORE_INDEX,
171
+ device=labels.device,
172
+ dtype=labels.dtype,
173
+ )
174
+ )
175
+ cur_labels = cur_labels[image_token_start + 1 :]
176
+ cur_image_idx += 1
177
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
178
+ self.config, "mm_use_im_start_end", False
179
+ ):
180
+ cur_input_ids = cur_input_ids[image_token_start + 2 :]
181
+ else:
182
+ cur_input_ids = cur_input_ids[image_token_start + 1 :]
183
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
184
+ if cur_input_ids.numel() > 0:
185
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
186
+ self.config, "mm_use_im_start_end", False
187
+ ):
188
+ cur_new_input_embeds.append(
189
+ self.get_model().embed_tokens(cur_input_ids).detach()
190
+ )
191
+ else:
192
+ cur_new_input_embeds.append(
193
+ self.get_model().embed_tokens(cur_input_ids)
194
+ )
195
+ if labels is not None:
196
+ cur_new_labels.append(cur_labels)
197
+ cur_new_input_embeds = [
198
+ x.to(device=self.device) for x in cur_new_input_embeds
199
+ ]
200
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
201
+ new_input_embeds.append(cur_new_input_embeds)
202
+ if labels is not None:
203
+ cur_new_labels = torch.cat(cur_new_labels, dim=0)
204
+ new_labels.append(cur_new_labels)
205
+
206
+ if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
207
+ max_len = max(x.shape[0] for x in new_input_embeds)
208
+
209
+ new_input_embeds_align = []
210
+ for cur_new_embed in new_input_embeds:
211
+ cur_new_embed = torch.cat(
212
+ (
213
+ cur_new_embed,
214
+ torch.zeros(
215
+ (max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]),
216
+ dtype=cur_new_embed.dtype,
217
+ device=cur_new_embed.device,
218
+ ),
219
+ ),
220
+ dim=0,
221
+ )
222
+ new_input_embeds_align.append(cur_new_embed)
223
+ new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
224
+
225
+ if labels is not None:
226
+ new_labels_align = []
227
+ _new_labels = new_labels
228
+ for cur_new_label in new_labels:
229
+ cur_new_label = torch.cat(
230
+ (
231
+ cur_new_label,
232
+ torch.full(
233
+ (max_len - cur_new_label.shape[0],),
234
+ IGNORE_INDEX,
235
+ dtype=cur_new_label.dtype,
236
+ device=cur_new_label.device,
237
+ ),
238
+ ),
239
+ dim=0,
240
+ )
241
+ new_labels_align.append(cur_new_label)
242
+ new_labels = torch.stack(new_labels_align, dim=0)
243
+
244
+ if attention_mask is not None:
245
+ new_attention_mask = []
246
+ for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(
247
+ attention_mask, _new_labels, new_labels
248
+ ):
249
+ new_attn_mask_pad_left = torch.full(
250
+ (cur_new_labels.shape[0] - labels.shape[1],),
251
+ True,
252
+ dtype=attention_mask.dtype,
253
+ device=attention_mask.device,
254
+ )
255
+ new_attn_mask_pad_right = torch.full(
256
+ (cur_new_labels_align.shape[0] - cur_new_labels.shape[0],),
257
+ False,
258
+ dtype=attention_mask.dtype,
259
+ device=attention_mask.device,
260
+ )
261
+ cur_new_attention_mask = torch.cat(
262
+ (
263
+ new_attn_mask_pad_left,
264
+ cur_attention_mask,
265
+ new_attn_mask_pad_right,
266
+ ),
267
+ dim=0,
268
+ )
269
+ new_attention_mask.append(cur_new_attention_mask)
270
+ attention_mask = torch.stack(new_attention_mask, dim=0)
271
+ assert attention_mask.shape == new_labels.shape
272
+ else:
273
+ new_input_embeds = torch.stack(new_input_embeds, dim=0)
274
+ if labels is not None:
275
+ new_labels = torch.stack(new_labels, dim=0)
276
+
277
+ if attention_mask is not None:
278
+ new_attn_mask_pad_left = torch.full(
279
+ (
280
+ attention_mask.shape[0],
281
+ new_input_embeds.shape[1] - input_ids.shape[1],
282
+ ),
283
+ True,
284
+ dtype=attention_mask.dtype,
285
+ device=attention_mask.device,
286
+ )
287
+ attention_mask = torch.cat(
288
+ (new_attn_mask_pad_left, attention_mask), dim=1
289
+ )
290
+ assert attention_mask.shape == new_input_embeds.shape[:2]
291
+
292
+ return None, attention_mask, past_key_values, new_input_embeds, new_labels
293
+
294
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
295
+ if model_args.mm_use_im_patch_token:
296
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
297
+ self.resize_token_embeddings(len(tokenizer))
298
+
299
+ if model_args.mm_use_im_start_end:
300
+ num_new_tokens = tokenizer.add_tokens(
301
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
302
+ )
303
+ self.resize_token_embeddings(len(tokenizer))
304
+
305
+ if num_new_tokens > 0:
306
+ input_embeddings = self.get_input_embeddings().weight.data
307
+ output_embeddings = self.get_output_embeddings().weight.data
308
+
309
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
310
+ dim=0, keepdim=True
311
+ )
312
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
313
+ dim=0, keepdim=True
314
+ )
315
+
316
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
317
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
318
+
319
+ if model_args.tune_mm_mlp_adapter:
320
+ for p in self.get_input_embeddings().parameters():
321
+ p.requires_grad = True
322
+ for p in self.get_output_embeddings().parameters():
323
+ p.requires_grad = False
324
+
325
+ elif model_args.mm_use_im_patch_token:
326
+ if model_args.tune_mm_mlp_adapter:
327
+ for p in self.get_input_embeddings().parameters():
328
+ p.requires_grad = False
329
+ for p in self.get_output_embeddings().parameters():
330
+ p.requires_grad = False
inference/model/multimodal_encoder/clip_encoder.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from transformers import CLIPPreTrainedModel, CLIPVisionConfig
7
+ from transformers.models.clip.modeling_clip import CLIPVisionTransformer
8
+ from inference.model.language_model.configuration_llava_phi import LlavaPhiVisionConfig
9
+
10
+
11
+ class CLIPVisionTower(CLIPPreTrainedModel):
12
+ config_class = LlavaPhiVisionConfig
13
+
14
+ def __init__(self, config):
15
+ super().__init__(config)
16
+
17
+ self.vision_model = CLIPVisionTransformer(config)
18
+ # Initialize weights and apply final processing
19
+ self.post_init()
20
+
21
+ def get_input_embeddings(self) -> nn.Module:
22
+ return self.vision_model.embeddings.patch_embedding
23
+
24
+ def feature_select(self, image_forward_outs):
25
+ image_features = image_forward_outs.hidden_states[
26
+ self.config.mm_vision_select_layer
27
+ ]
28
+ if self.config.mm_vision_select_feature == "patch":
29
+ image_features = image_features[:, 1:]
30
+ elif self.config.mm_vision_select_feature == "cls_patch":
31
+ image_features = image_features
32
+ else:
33
+ raise ValueError(
34
+ f"Unexpected select feature: {self.config.mm_vision_select_feature}"
35
+ )
36
+ return image_features
37
+
38
+ def forward(self, images):
39
+ if type(images) is list:
40
+ image_features = []
41
+ for image in images:
42
+ image_forward_out = self.vision_model(
43
+ image.to(device=self.device, dtype=self.dtype).unsqueeze(0),
44
+ output_hidden_states=True,
45
+ )
46
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
47
+ image_features.append(image_feature)
48
+ else:
49
+ image_forward_outs = self.vision_model(
50
+ images.to(device=self.device, dtype=self.dtype),
51
+ output_hidden_states=True,
52
+ )
53
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
54
+
55
+ return image_features
56
+
57
+ @property
58
+ def dummy_feature(self):
59
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
60
+
61
+ @property
62
+ def dtype(self):
63
+ return list(self.vision_model.parameters())[0].dtype
64
+
65
+ @property
66
+ def device(self):
67
+ return list(self.vision_model.parameters())[0].device
68
+
69
+ @property
70
+ def hidden_size(self):
71
+ return self.config.hidden_size
72
+
73
+ @property
74
+ def num_patches(self):
75
+ return (self.config.image_size // self.config.patch_size) ** 2
76
+
77
+
78
+ if __name__ == "__main__":
79
+ clip_config = CLIPVisionConfig.from_pretrained(
80
+ "/data/private/zhumj/GPTcode/mm-phi/openai/clip-vit-large-patch14-336"
81
+ )
82
+ print("################ clip_config ##############")
83
+ print(clip_config)
84
+ phi_vis_config = LlavaPhiVisionConfig(**clip_config.to_dict())
85
+ print("################ phi_vis_config ##############")
86
+ print(phi_vis_config)
87
+
88
+ model = CLIPVisionTower(clip_config)
89
+ # print(list(model.vision_model.parameters())[0].dtype)
inference/model/multimodal_projector/builder.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import re
4
+
5
+
6
+ class IdentityMap(nn.Module):
7
+ def __init__(self):
8
+ super().__init__()
9
+
10
+ def forward(self, x, *args, **kwargs):
11
+ return x
12
+
13
+ @property
14
+ def config(self):
15
+ return {"mm_projector_type": "identity"}
16
+
17
+
18
+ class SimpleResBlock(nn.Module):
19
+ def __init__(self, channels):
20
+ super().__init__()
21
+ self.pre_norm = nn.LayerNorm(channels)
22
+
23
+ self.proj = nn.Sequential(
24
+ nn.Linear(channels, channels), nn.GELU(), nn.Linear(channels, channels)
25
+ )
26
+
27
+ def forward(self, x):
28
+ x = self.pre_norm(x)
29
+ return x + self.proj(x)
30
+
31
+
32
+ def build_vision_projector(config):
33
+ projector_type = getattr(config, "mm_projector_type", "linear")
34
+
35
+ if projector_type == "linear":
36
+ return nn.Linear(config.mm_hidden_size, config.hidden_size)
37
+
38
+ mlp_gelu_match = re.match(r"^mlp(\d+)x_gelu$", projector_type)
39
+ if mlp_gelu_match:
40
+ mlp_depth = int(mlp_gelu_match.group(1))
41
+ modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
42
+ for _ in range(1, mlp_depth):
43
+ modules.append(nn.GELU())
44
+ modules.append(nn.Linear(config.hidden_size, config.hidden_size))
45
+ return nn.Sequential(*modules)
46
+
47
+ if projector_type == "identity":
48
+ return IdentityMap()
49
+
50
+ raise ValueError(f"Unknown projector type: {projector_type}")
llava-phi/llava_phi/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .model import LlavaPhiForCausalLM
llava-phi/llava_phi/constants.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
+ WORKER_HEART_BEAT_INTERVAL = 15
3
+
4
+ LOGDIR = "."
5
+
6
+ # Model Constants
7
+ IGNORE_INDEX = -100
8
+ IMAGE_TOKEN_INDEX = -200
9
+ DEFAULT_IMAGE_TOKEN = "<image>"
10
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
11
+ DEFAULT_IM_START_TOKEN = "<im_start>"
12
+ DEFAULT_IM_END_TOKEN = "<im_end>"
llava-phi/llava_phi/conversation.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Tuple
4
+
5
+
6
+ class SeparatorStyle(Enum):
7
+ """Different separator style."""
8
+ SINGLE = auto()
9
+ TWO = auto()
10
+ MPT = auto()
11
+ PLAIN = auto()
12
+ LLAMA_2 = auto()
13
+
14
+
15
+ @dataclasses.dataclass
16
+ class Conversation:
17
+ """A class that keeps all conversation history."""
18
+ system: str
19
+ roles: List[str]
20
+ messages: List[List[str]]
21
+ offset: int
22
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
23
+ sep: str = "###"
24
+ sep2: str = None
25
+ version: str = "Unknown"
26
+
27
+ skip_next: bool = False
28
+
29
+ def get_prompt(self):
30
+ messages = self.messages
31
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
32
+ messages = self.messages.copy()
33
+ init_role, init_msg = messages[0].copy()
34
+ init_msg = init_msg[0].replace("<image>", "").strip()
35
+ if 'mmtag' in self.version:
36
+ messages[0] = (init_role, init_msg)
37
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
38
+ messages.insert(1, (self.roles[1], "Received."))
39
+ else:
40
+ messages[0] = (init_role, "<image>\n" + init_msg)
41
+
42
+ if self.sep_style == SeparatorStyle.SINGLE:
43
+ ret = self.system + self.sep
44
+ for role, message in messages:
45
+ if message:
46
+ if type(message) is tuple:
47
+ message, _, _ = message
48
+ ret += role + ": " + message + self.sep
49
+ else:
50
+ ret += role + ":"
51
+ elif self.sep_style == SeparatorStyle.TWO:
52
+ seps = [self.sep, self.sep2]
53
+ ret = self.system + seps[0]
54
+ for i, (role, message) in enumerate(messages):
55
+ if message:
56
+ if type(message) is tuple:
57
+ message, _, _ = message
58
+ ret += role + ": " + message + seps[i % 2]
59
+ else:
60
+ ret += role + ":"
61
+ elif self.sep_style == SeparatorStyle.PLAIN:
62
+ seps = [self.sep, self.sep2]
63
+ ret = self.system
64
+ for i, (role, message) in enumerate(messages):
65
+ if message:
66
+ if type(message) is tuple:
67
+ message, _, _ = message
68
+ ret += message + seps[i % 2]
69
+ else:
70
+ ret += ""
71
+ else:
72
+ raise ValueError(f"Invalid style: {self.sep_style}")
73
+
74
+ return ret
75
+
76
+ def append_message(self, role, message):
77
+ self.messages.append([role, message])
78
+
79
+ def get_images(self, return_pil=False):
80
+ images = []
81
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
82
+ if i % 2 == 0:
83
+ if type(msg) is tuple:
84
+ import base64
85
+ from io import BytesIO
86
+ from PIL import Image
87
+ msg, image, image_process_mode = msg
88
+ if image_process_mode == "Pad":
89
+ def expand2square(pil_img, background_color=(122, 116, 104)):
90
+ width, height = pil_img.size
91
+ if width == height:
92
+ return pil_img
93
+ elif width > height:
94
+ result = Image.new(pil_img.mode, (width, width), background_color)
95
+ result.paste(pil_img, (0, (width - height) // 2))
96
+ return result
97
+ else:
98
+ result = Image.new(pil_img.mode, (height, height), background_color)
99
+ result.paste(pil_img, ((height - width) // 2, 0))
100
+ return result
101
+ image = expand2square(image)
102
+ elif image_process_mode in ["Default", "Crop"]:
103
+ pass
104
+ elif image_process_mode == "Resize":
105
+ image = image.resize((336, 336))
106
+ else:
107
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
108
+ max_hw, min_hw = max(image.size), min(image.size)
109
+ aspect_ratio = max_hw / min_hw
110
+ max_len, min_len = 800, 400
111
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
112
+ longest_edge = int(shortest_edge * aspect_ratio)
113
+ W, H = image.size
114
+ if longest_edge != max(image.size):
115
+ if H > W:
116
+ H, W = longest_edge, shortest_edge
117
+ else:
118
+ H, W = shortest_edge, longest_edge
119
+ image = image.resize((W, H))
120
+ if return_pil:
121
+ images.append(image)
122
+ else:
123
+ buffered = BytesIO()
124
+ image.save(buffered, format="PNG")
125
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
126
+ images.append(img_b64_str)
127
+ return images
128
+
129
+ def to_gradio_chatbot(self):
130
+ ret = []
131
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
132
+ if i % 2 == 0:
133
+ if type(msg) is tuple:
134
+ import base64
135
+ from io import BytesIO
136
+ msg, image, image_process_mode = msg
137
+ max_hw, min_hw = max(image.size), min(image.size)
138
+ aspect_ratio = max_hw / min_hw
139
+ max_len, min_len = 800, 400
140
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
141
+ longest_edge = int(shortest_edge * aspect_ratio)
142
+ W, H = image.size
143
+ if H > W:
144
+ H, W = longest_edge, shortest_edge
145
+ else:
146
+ H, W = shortest_edge, longest_edge
147
+ image = image.resize((W, H))
148
+ buffered = BytesIO()
149
+ image.save(buffered, format="JPEG")
150
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
151
+ img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
152
+ msg = img_str + msg.replace('<image>', '').strip()
153
+ ret.append([msg, None])
154
+ else:
155
+ ret.append([msg, None])
156
+ else:
157
+ ret[-1][-1] = msg
158
+ return ret
159
+
160
+ def copy(self):
161
+ return Conversation(
162
+ system=self.system,
163
+ roles=self.roles,
164
+ messages=[[x, y] for x, y in self.messages],
165
+ offset=self.offset,
166
+ sep_style=self.sep_style,
167
+ sep=self.sep,
168
+ sep2=self.sep2,
169
+ version=self.version)
170
+
171
+ def dict(self):
172
+ if len(self.get_images()) > 0:
173
+ return {
174
+ "system": self.system,
175
+ "roles": self.roles,
176
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
177
+ "offset": self.offset,
178
+ "sep": self.sep,
179
+ "sep2": self.sep2,
180
+ }
181
+ return {
182
+ "system": self.system,
183
+ "roles": self.roles,
184
+ "messages": self.messages,
185
+ "offset": self.offset,
186
+ "sep": self.sep,
187
+ "sep2": self.sep2,
188
+ }
189
+
190
+
191
+ conv_phi_v0 = Conversation(
192
+ system="A chat between a curious user and an artificial intelligence assistant. "
193
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
194
+ roles=("USER", "ASSISTANT"),
195
+ version="v0",
196
+ messages=(),
197
+ offset=0,
198
+ sep_style=SeparatorStyle.TWO,
199
+ sep=" ",
200
+ sep2="<|endoftext|>",
201
+ )
202
+
203
+ conv_llava_plain = Conversation(
204
+ system="",
205
+ roles=("", ""),
206
+ messages=(
207
+ ),
208
+ offset=0,
209
+ sep_style=SeparatorStyle.PLAIN,
210
+ sep="\n",
211
+ )
212
+
213
+ default_conversation = conv_phi_v0
214
+ conv_templates = {
215
+ "default": conv_phi_v0,
216
+ "v0": conv_phi_v0,
217
+ "phi-2_v0": conv_phi_v0,
218
+
219
+ "plain": conv_llava_plain,
220
+ }
221
+
222
+
223
+ if __name__ == "__main__":
224
+ print(default_conversation.get_prompt())
llava-phi/llava_phi/eval/eval_gpt_review.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import openai
6
+ import tqdm
7
+ import ray
8
+ import time
9
+
10
+ NUM_SECONDS_TO_SLEEP = 3
11
+
12
+ @ray.remote(num_cpus=4)
13
+ def get_eval(content: str, max_tokens: int):
14
+ while True:
15
+ try:
16
+ response = openai.ChatCompletion.create(
17
+ model='gpt-4',
18
+ messages=[{
19
+ 'role': 'system',
20
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
21
+ }, {
22
+ 'role': 'user',
23
+ 'content': content,
24
+ }],
25
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
26
+ max_tokens=max_tokens,
27
+ )
28
+ break
29
+ except openai.error.RateLimitError:
30
+ pass
31
+ except Exception as e:
32
+ print(e)
33
+ time.sleep(NUM_SECONDS_TO_SLEEP)
34
+
35
+ print('success!')
36
+ return response['choices'][0]['message']['content']
37
+
38
+
39
+ def parse_score(review):
40
+ try:
41
+ score_pair = review.split('\n')[0]
42
+ score_pair = score_pair.replace(',', ' ')
43
+ sp = score_pair.split(' ')
44
+ if len(sp) == 2:
45
+ return [float(sp[0]), float(sp[1])]
46
+ else:
47
+ print('error', review)
48
+ return [-1, -1]
49
+ except Exception as e:
50
+ print(e)
51
+ print('error', review)
52
+ return [-1, -1]
53
+
54
+
55
+ if __name__ == '__main__':
56
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
57
+ parser.add_argument('-q', '--question')
58
+ # parser.add_argument('-a', '--answer')
59
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
60
+ parser.add_argument('-r', '--rule')
61
+ parser.add_argument('-o', '--output')
62
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
63
+ args = parser.parse_args()
64
+
65
+ ray.init()
66
+
67
+ f_q = open(os.path.expanduser(args.question))
68
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
69
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
70
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
71
+
72
+ review_file = open(f'{args.output}', 'w')
73
+
74
+ js_list = []
75
+ handles = []
76
+ idx = 0
77
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
78
+ # if idx == 1:
79
+ # break
80
+
81
+ ques = json.loads(ques_js)
82
+ ans1 = json.loads(ans1_js)
83
+ ans2 = json.loads(ans2_js)
84
+
85
+ category = json.loads(ques_js)['category']
86
+ if category in rule_dict:
87
+ rule = rule_dict[category]
88
+ else:
89
+ rule = rule_dict['default']
90
+ prompt = rule['prompt']
91
+ role = rule['role']
92
+ content = (f'[Question]\n{ques["text"]}\n\n'
93
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
94
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
95
+ f'[System]\n{prompt}\n\n')
96
+ js_list.append({
97
+ 'id': idx+1,
98
+ 'question_id': ques['question_id'],
99
+ 'answer1_id': ans1['answer_id'],
100
+ 'answer2_id': ans2['answer_id'],
101
+ 'category': category})
102
+ idx += 1
103
+ handles.append(get_eval.remote(content, args.max_tokens))
104
+ # To avoid the rate limit set by OpenAI
105
+ time.sleep(NUM_SECONDS_TO_SLEEP)
106
+
107
+ reviews = ray.get(handles)
108
+ for idx, review in enumerate(reviews):
109
+ scores = parse_score(review)
110
+ js_list[idx]['content'] = review
111
+ js_list[idx]['tuple'] = scores
112
+ review_file.write(json.dumps(js_list[idx]) + '\n')
113
+ review_file.close()
llava-phi/llava_phi/eval/eval_gpt_review_bench.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import openai
6
+ import time
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.5
9
+
10
+
11
+ def get_eval(content: str, max_tokens: int):
12
+ while True:
13
+ try:
14
+ response = openai.ChatCompletion.create(
15
+ model='gpt-4-0314',
16
+ messages=[{
17
+ 'role': 'system',
18
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
19
+ }, {
20
+ 'role': 'user',
21
+ 'content': content,
22
+ }],
23
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
24
+ max_tokens=max_tokens,
25
+ )
26
+ break
27
+ except openai.error.RateLimitError:
28
+ pass
29
+ except Exception as e:
30
+ print(e)
31
+ time.sleep(NUM_SECONDS_TO_SLEEP)
32
+
33
+ return response['choices'][0]['message']['content']
34
+
35
+
36
+ def parse_score(review):
37
+ try:
38
+ score_pair = review.split('\n')[0]
39
+ score_pair = score_pair.replace(',', ' ')
40
+ sp = score_pair.split(' ')
41
+ if len(sp) == 2:
42
+ return [float(sp[0]), float(sp[1])]
43
+ else:
44
+ print('error', review)
45
+ return [-1, -1]
46
+ except Exception as e:
47
+ print(e)
48
+ print('error', review)
49
+ return [-1, -1]
50
+
51
+
52
+ if __name__ == '__main__':
53
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
54
+ parser.add_argument('-q', '--question')
55
+ parser.add_argument('-c', '--context')
56
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
57
+ parser.add_argument('-r', '--rule')
58
+ parser.add_argument('-o', '--output')
59
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
60
+ args = parser.parse_args()
61
+
62
+ f_q = open(os.path.expanduser(args.question))
63
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
64
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
65
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
66
+
67
+ if os.path.isfile(os.path.expanduser(args.output)):
68
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
69
+ else:
70
+ cur_reviews = []
71
+
72
+ review_file = open(f'{args.output}', 'a')
73
+
74
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
75
+ image_to_context = {context['image']: context for context in context_list}
76
+
77
+ handles = []
78
+ idx = 0
79
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
80
+ ques = json.loads(ques_js)
81
+ ans1 = json.loads(ans1_js)
82
+ ans2 = json.loads(ans2_js)
83
+
84
+ inst = image_to_context[ques['image']]
85
+
86
+ if isinstance(inst['caption'], list):
87
+ cap_str = '\n'.join(inst['caption'])
88
+ else:
89
+ cap_str = inst['caption']
90
+
91
+ category = 'llava_bench_' + json.loads(ques_js)['category']
92
+ if category in rule_dict:
93
+ rule = rule_dict[category]
94
+ else:
95
+ assert False, f"Visual QA category not found in rule file: {category}."
96
+ prompt = rule['prompt']
97
+ role = rule['role']
98
+ content = (f'[Context]\n{cap_str}\n\n'
99
+ f'[Question]\n{ques["text"]}\n\n'
100
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
101
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
102
+ f'[System]\n{prompt}\n\n')
103
+ cur_js = {
104
+ 'id': idx+1,
105
+ 'question_id': ques['question_id'],
106
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
107
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
108
+ 'category': category
109
+ }
110
+ if idx >= len(cur_reviews):
111
+ review = get_eval(content, args.max_tokens)
112
+ scores = parse_score(review)
113
+ cur_js['content'] = review
114
+ cur_js['tuple'] = scores
115
+ review_file.write(json.dumps(cur_js) + '\n')
116
+ review_file.flush()
117
+ else:
118
+ print(f'Skipping {idx} as we already have it.')
119
+ idx += 1
120
+ print(idx)
121
+ review_file.close()
llava-phi/llava_phi/eval/eval_gpt_review_visual.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import openai
6
+ import time
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.5
9
+
10
+
11
+ def get_eval(content: str, max_tokens: int):
12
+ while True:
13
+ try:
14
+ response = openai.ChatCompletion.create(
15
+ model='gpt-4-0314',
16
+ messages=[{
17
+ 'role': 'system',
18
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
19
+ }, {
20
+ 'role': 'user',
21
+ 'content': content,
22
+ }],
23
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
24
+ max_tokens=max_tokens,
25
+ )
26
+ break
27
+ except openai.error.RateLimitError:
28
+ pass
29
+ except Exception as e:
30
+ print(e)
31
+ time.sleep(NUM_SECONDS_TO_SLEEP)
32
+
33
+ return response['choices'][0]['message']['content']
34
+
35
+
36
+ def parse_score(review):
37
+ try:
38
+ score_pair = review.split('\n')[0]
39
+ score_pair = score_pair.replace(',', ' ')
40
+ sp = score_pair.split(' ')
41
+ if len(sp) == 2:
42
+ return [float(sp[0]), float(sp[1])]
43
+ else:
44
+ print('error', review)
45
+ return [-1, -1]
46
+ except Exception as e:
47
+ print(e)
48
+ print('error', review)
49
+ return [-1, -1]
50
+
51
+
52
+ if __name__ == '__main__':
53
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
54
+ parser.add_argument('-q', '--question')
55
+ parser.add_argument('-c', '--context')
56
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
57
+ parser.add_argument('-r', '--rule')
58
+ parser.add_argument('-o', '--output')
59
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
60
+ args = parser.parse_args()
61
+
62
+ f_q = open(os.path.expanduser(args.question))
63
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
64
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
65
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
66
+
67
+ if os.path.isfile(os.path.expanduser(args.output)):
68
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
69
+ else:
70
+ cur_reviews = []
71
+
72
+ review_file = open(f'{args.output}', 'a')
73
+
74
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
75
+ image_to_context = {context['image']: context for context in context_list}
76
+
77
+ handles = []
78
+ idx = 0
79
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
80
+ ques = json.loads(ques_js)
81
+ ans1 = json.loads(ans1_js)
82
+ ans2 = json.loads(ans2_js)
83
+
84
+ inst = image_to_context[ques['image']]
85
+ cap_str = '\n'.join(inst['captions'])
86
+ box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
87
+
88
+ category = json.loads(ques_js)['category']
89
+ if category in rule_dict:
90
+ rule = rule_dict[category]
91
+ else:
92
+ assert False, f"Visual QA category not found in rule file: {category}."
93
+ prompt = rule['prompt']
94
+ role = rule['role']
95
+ content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
96
+ f'[Question]\n{ques["text"]}\n\n'
97
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
98
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
99
+ f'[System]\n{prompt}\n\n')
100
+ cur_js = {
101
+ 'id': idx+1,
102
+ 'question_id': ques['question_id'],
103
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
104
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
105
+ 'category': category
106
+ }
107
+ if idx >= len(cur_reviews):
108
+ review = get_eval(content, args.max_tokens)
109
+ scores = parse_score(review)
110
+ cur_js['content'] = review
111
+ cur_js['tuple'] = scores
112
+ review_file.write(json.dumps(cur_js) + '\n')
113
+ review_file.flush()
114
+ else:
115
+ print(f'Skipping {idx} as we already have it.')
116
+ idx += 1
117
+ print(idx)
118
+ review_file.close()
llava-phi/llava_phi/eval/eval_pope.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+
5
+ def eval_pope(answers, label_file):
6
+ label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
7
+
8
+ for answer in answers:
9
+ text = answer['text']
10
+
11
+ # Only keep the first sentence
12
+ if text.find('.') != -1:
13
+ text = text.split('.')[0]
14
+
15
+ text = text.replace(',', '')
16
+ words = text.split(' ')
17
+ if 'No' in words or 'not' in words or 'no' in words:
18
+ answer['text'] = 'no'
19
+ else:
20
+ answer['text'] = 'yes'
21
+
22
+ for i in range(len(label_list)):
23
+ if label_list[i] == 'no':
24
+ label_list[i] = 0
25
+ else:
26
+ label_list[i] = 1
27
+
28
+ pred_list = []
29
+ for answer in answers:
30
+ if answer['text'] == 'no':
31
+ pred_list.append(0)
32
+ else:
33
+ pred_list.append(1)
34
+
35
+ pos = 1
36
+ neg = 0
37
+ yes_ratio = pred_list.count(1) / len(pred_list)
38
+
39
+ TP, TN, FP, FN = 0, 0, 0, 0
40
+ for pred, label in zip(pred_list, label_list):
41
+ if pred == pos and label == pos:
42
+ TP += 1
43
+ elif pred == pos and label == neg:
44
+ FP += 1
45
+ elif pred == neg and label == neg:
46
+ TN += 1
47
+ elif pred == neg and label == pos:
48
+ FN += 1
49
+
50
+ print('TP\tFP\tTN\tFN\t')
51
+ print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
52
+
53
+ precision = float(TP) / float(TP + FP)
54
+ recall = float(TP) / float(TP + FN)
55
+ f1 = 2*precision*recall / (precision + recall)
56
+ acc = (TP + TN) / (TP + TN + FP + FN)
57
+ print('Accuracy: {}'.format(acc))
58
+ print('Precision: {}'.format(precision))
59
+ print('Recall: {}'.format(recall))
60
+ print('F1 score: {}'.format(f1))
61
+ print('Yes ratio: {}'.format(yes_ratio))
62
+ print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
63
+
64
+ if __name__ == "__main__":
65
+ parser = argparse.ArgumentParser()
66
+ parser.add_argument("--annotation-dir", type=str)
67
+ parser.add_argument("--question-file", type=str)
68
+ parser.add_argument("--result-file", type=str)
69
+ args = parser.parse_args()
70
+
71
+ questions = [json.loads(line) for line in open(args.question_file)]
72
+ questions = {question['question_id']: question for question in questions}
73
+ answers = [json.loads(q) for q in open(args.result_file)]
74
+ for file in os.listdir(args.annotation_dir):
75
+ assert file.startswith('coco_pope_')
76
+ assert file.endswith('.json')
77
+ category = file[10:-5]
78
+ cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
79
+ print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
80
+ eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
81
+ print("====================================")
llava-phi/llava_phi/eval/eval_science_qa.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import re
5
+ import random
6
+
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument('--base-dir', type=str)
11
+ parser.add_argument('--result-file', type=str)
12
+ parser.add_argument('--output-file', type=str)
13
+ parser.add_argument('--output-result', type=str)
14
+ parser.add_argument('--split', type=str, default='test')
15
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
16
+ return parser.parse_args()
17
+
18
+
19
+ def convert_caps(results):
20
+ fakecaps = []
21
+ for result in results:
22
+ image_id = result['question_id']
23
+ caption = result['text']
24
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
25
+ return fakecaps
26
+
27
+
28
+ def get_pred_idx(prediction, choices, options):
29
+ """
30
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
31
+ """
32
+ if prediction in options[:len(choices)]:
33
+ return options.index(prediction)
34
+ else:
35
+ return -1
36
+ return random.choice(range(len(choices)))
37
+
38
+
39
+ if __name__ == "__main__":
40
+ args = get_args()
41
+
42
+ base_dir = args.base_dir
43
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
44
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
45
+ predictions = [json.loads(line) for line in open(args.result_file)]
46
+ predictions = {pred['question_id']: pred for pred in predictions}
47
+ split_problems = {idx: problems[idx] for idx in split_indices}
48
+
49
+ results = {'correct': [], 'incorrect': []}
50
+ sqa_results = {}
51
+ sqa_results['acc'] = None
52
+ sqa_results['correct'] = None
53
+ sqa_results['count'] = None
54
+ sqa_results['results'] = {}
55
+ sqa_results['outputs'] = {}
56
+
57
+ for prob_id, prob in split_problems.items():
58
+ if prob_id not in predictions:
59
+ pred = {'text': 'FAILED', 'prompt': 'Unknown'}
60
+ pred_text = 'FAILED'
61
+ else:
62
+ pred = predictions[prob_id]
63
+ pred_text = pred['text']
64
+
65
+ if pred_text in args.options:
66
+ answer = pred_text
67
+ elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ":
68
+ answer = pred_text[0]
69
+ else:
70
+ pattern = re.compile(r'The answer is ([A-Z]).')
71
+ res = pattern.findall(pred_text)
72
+ if len(res) == 1:
73
+ answer = res[0] # 'A', 'B', ...
74
+ else:
75
+ answer = "FAILED"
76
+
77
+ pred_idx = get_pred_idx(answer, prob['choices'], args.options)
78
+
79
+ analysis = {
80
+ 'question_id': prob_id,
81
+ 'parsed_ans': answer,
82
+ 'ground_truth': args.options[prob['answer']],
83
+ 'question': pred['prompt'],
84
+ 'pred': pred_text,
85
+ 'is_multimodal': '<image>' in pred['prompt'],
86
+ }
87
+
88
+ sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
89
+ sqa_results['outputs'][prob_id] = pred_text
90
+
91
+ if pred_idx == prob['answer']:
92
+ results['correct'].append(analysis)
93
+ else:
94
+ results['incorrect'].append(analysis)
95
+
96
+ correct = len(results['correct'])
97
+ total = len(results['correct']) + len(results['incorrect'])
98
+
99
+ ###### IMG ######
100
+ multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']])
101
+ multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']])
102
+ multimodal_total = multimodal_correct + multimodal_incorrect
103
+ ###### IMG ######
104
+
105
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%')
106
+
107
+ sqa_results['acc'] = correct / total * 100
108
+ sqa_results['correct'] = correct
109
+ sqa_results['count'] = total
110
+
111
+ with open(args.output_file, 'w') as f:
112
+ json.dump(results, f, indent=2)
113
+ with open(args.output_result, 'w') as f:
114
+ json.dump(sqa_results, f, indent=2)
llava-phi/llava_phi/eval/eval_science_qa_gpt4.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import re
5
+ import random
6
+ from collections import defaultdict
7
+
8
+
9
+ def get_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--base-dir', type=str)
12
+ parser.add_argument('--gpt4-result', type=str)
13
+ parser.add_argument('--our-result', type=str)
14
+ parser.add_argument('--split', type=str, default='test')
15
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
16
+ return parser.parse_args()
17
+
18
+
19
+ def convert_caps(results):
20
+ fakecaps = []
21
+ for result in results:
22
+ image_id = result['question_id']
23
+ caption = result['text']
24
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
25
+ return fakecaps
26
+
27
+
28
+ def get_pred_idx(prediction, choices, options):
29
+ """
30
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
31
+ """
32
+ if prediction in options[:len(choices)]:
33
+ return options.index(prediction)
34
+ else:
35
+ return random.choice(range(len(choices)))
36
+
37
+
38
+ if __name__ == "__main__":
39
+ args = get_args()
40
+
41
+ base_dir = args.base_dir
42
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
43
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
44
+ our_predictions = [json.loads(line) for line in open(args.our_result)]
45
+ our_predictions = {pred['question_id']: pred for pred in our_predictions}
46
+ split_problems = {idx: problems[idx] for idx in split_indices}
47
+
48
+ gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
49
+
50
+ results = defaultdict(lambda: 0)
51
+
52
+ for prob_id, prob in split_problems.items():
53
+ if prob_id not in our_predictions:
54
+ continue
55
+ if prob_id not in gpt4_predictions:
56
+ continue
57
+ our_pred = our_predictions[prob_id]['text']
58
+ gpt4_pred = gpt4_predictions[prob_id]
59
+
60
+ pattern = re.compile(r'The answer is ([A-Z]).')
61
+ our_res = pattern.findall(our_pred)
62
+ if len(our_res) == 1:
63
+ our_answer = our_res[0] # 'A', 'B', ...
64
+ else:
65
+ our_answer = "FAILED"
66
+ gpt4_res = pattern.findall(gpt4_pred)
67
+ if len(gpt4_res) == 1:
68
+ gpt4_answer = gpt4_res[0] # 'A', 'B', ...
69
+ else:
70
+ gpt4_answer = "FAILED"
71
+
72
+ our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
73
+ gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
74
+
75
+ if gpt4_answer == 'FAILED':
76
+ results['gpt4_failed'] += 1
77
+ # continue
78
+ gpt4_pred_idx = our_pred_idx
79
+ # if our_pred_idx != prob['answer']:
80
+ # print(our_predictions[prob_id]['prompt'])
81
+ # print('-----------------')
82
+ # print(f'LECTURE: {prob["lecture"]}')
83
+ # print(f'SOLUTION: {prob["solution"]}')
84
+ # print('=====================')
85
+ else:
86
+ # continue
87
+ pass
88
+ # gpt4_pred_idx = our_pred_idx
89
+
90
+ if gpt4_pred_idx == prob['answer']:
91
+ results['correct'] += 1
92
+ else:
93
+ results['incorrect'] += 1
94
+
95
+
96
+ if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
97
+ results['correct_upperbound'] += 1
98
+
99
+ correct = results['correct']
100
+ total = results['correct'] + results['incorrect']
101
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
102
+ print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
103
+ print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
104
+
llava-phi/llava_phi/eval/eval_science_qa_gpt4_requery.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import re
5
+ import random
6
+ from collections import defaultdict
7
+
8
+
9
+ def get_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--base-dir', type=str)
12
+ parser.add_argument('--gpt4-result', type=str)
13
+ parser.add_argument('--requery-result', type=str)
14
+ parser.add_argument('--our-result', type=str)
15
+ parser.add_argument('--output-result', type=str)
16
+ parser.add_argument('--split', type=str, default='test')
17
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
18
+ return parser.parse_args()
19
+
20
+
21
+ def convert_caps(results):
22
+ fakecaps = []
23
+ for result in results:
24
+ image_id = result['question_id']
25
+ caption = result['text']
26
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
27
+ return fakecaps
28
+
29
+
30
+ def get_pred_idx(prediction, choices, options):
31
+ """
32
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
33
+ """
34
+ if prediction in options[:len(choices)]:
35
+ return options.index(prediction)
36
+ else:
37
+ return random.choice(range(len(choices)))
38
+
39
+
40
+ if __name__ == "__main__":
41
+ args = get_args()
42
+
43
+ base_dir = args.base_dir
44
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
45
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
46
+ our_predictions = [json.loads(line) for line in open(args.our_result)]
47
+ our_predictions = {pred['question_id']: pred for pred in our_predictions}
48
+ split_problems = {idx: problems[idx] for idx in split_indices}
49
+
50
+ requery_predictions = [json.loads(line) for line in open(args.requery_result)]
51
+ requery_predictions = {pred['question_id']: pred for pred in requery_predictions}
52
+
53
+ gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
54
+
55
+ results = defaultdict(lambda: 0)
56
+
57
+ sqa_results = {}
58
+ sqa_results['acc'] = None
59
+ sqa_results['correct'] = None
60
+ sqa_results['count'] = None
61
+ sqa_results['results'] = {}
62
+ sqa_results['outputs'] = {}
63
+
64
+ for prob_id, prob in split_problems.items():
65
+ if prob_id not in our_predictions:
66
+ assert False
67
+ if prob_id not in gpt4_predictions:
68
+ assert False
69
+ our_pred = our_predictions[prob_id]['text']
70
+ gpt4_pred = gpt4_predictions[prob_id]
71
+ if prob_id not in requery_predictions:
72
+ results['missing_requery'] += 1
73
+ requery_pred = "MISSING"
74
+ else:
75
+ requery_pred = requery_predictions[prob_id]['text']
76
+
77
+ pattern = re.compile(r'The answer is ([A-Z]).')
78
+ our_res = pattern.findall(our_pred)
79
+ if len(our_res) == 1:
80
+ our_answer = our_res[0] # 'A', 'B', ...
81
+ else:
82
+ our_answer = "FAILED"
83
+
84
+ requery_res = pattern.findall(requery_pred)
85
+ if len(requery_res) == 1:
86
+ requery_answer = requery_res[0] # 'A', 'B', ...
87
+ else:
88
+ requery_answer = "FAILED"
89
+
90
+ gpt4_res = pattern.findall(gpt4_pred)
91
+ if len(gpt4_res) == 1:
92
+ gpt4_answer = gpt4_res[0] # 'A', 'B', ...
93
+ else:
94
+ gpt4_answer = "FAILED"
95
+
96
+ our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
97
+ gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
98
+ requery_pred_idx = get_pred_idx(requery_answer, prob['choices'], args.options)
99
+
100
+ results['total'] += 1
101
+
102
+ if gpt4_answer == 'FAILED':
103
+ results['gpt4_failed'] += 1
104
+ if gpt4_pred_idx == prob['answer']:
105
+ results['gpt4_correct'] += 1
106
+ if our_pred_idx == prob['answer']:
107
+ results['gpt4_ourvisual_correct'] += 1
108
+ elif gpt4_pred_idx == prob['answer']:
109
+ results['gpt4_correct'] += 1
110
+ results['gpt4_ourvisual_correct'] += 1
111
+
112
+ if our_pred_idx == prob['answer']:
113
+ results['our_correct'] += 1
114
+
115
+ if requery_answer == 'FAILED':
116
+ sqa_results['results'][prob_id] = our_pred_idx
117
+ if our_pred_idx == prob['answer']:
118
+ results['requery_correct'] += 1
119
+ else:
120
+ sqa_results['results'][prob_id] = requery_pred_idx
121
+ if requery_pred_idx == prob['answer']:
122
+ results['requery_correct'] += 1
123
+ else:
124
+ print(f"""
125
+ Question ({args.options[prob['answer']]}): {our_predictions[prob_id]['prompt']}
126
+ Our ({our_answer}): {our_pred}
127
+ GPT-4 ({gpt4_answer}): {gpt4_pred}
128
+ Requery ({requery_answer}): {requery_pred}
129
+ print("=====================================")
130
+ """)
131
+
132
+ if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
133
+ results['correct_upperbound'] += 1
134
+
135
+ total = results['total']
136
+ print(f'Total: {total}, Our-Correct: {results["our_correct"]}, Accuracy: {results["our_correct"] / total * 100:.2f}%')
137
+ print(f'Total: {total}, GPT-4-Correct: {results["gpt4_correct"]}, Accuracy: {results["gpt4_correct"] / total * 100:.2f}%')
138
+ print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
139
+ print(f'Total: {total}, GPT-4-OursVisual-Correct: {results["gpt4_ourvisual_correct"]}, Accuracy: {results["gpt4_ourvisual_correct"] / total * 100:.2f}%')
140
+ print(f'Total: {total}, Requery-Correct: {results["requery_correct"]}, Accuracy: {results["requery_correct"] / total * 100:.2f}%')
141
+ print(f'Total: {total}, Correct upper: {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
142
+
143
+ sqa_results['acc'] = results["requery_correct"] / total * 100
144
+ sqa_results['correct'] = results["requery_correct"]
145
+ sqa_results['count'] = total
146
+
147
+ with open(args.output_result, 'w') as f:
148
+ json.dump(sqa_results, f, indent=2)
149
+
llava-phi/llava_phi/eval/eval_textvqa.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ import re
5
+
6
+ from llava_phi.eval.m4c_evaluator import TextVQAAccuracyEvaluator
7
+
8
+
9
+ def get_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--annotation-file', type=str)
12
+ parser.add_argument('--result-file', type=str)
13
+ parser.add_argument('--result-dir', type=str)
14
+ return parser.parse_args()
15
+
16
+
17
+ def prompt_processor(prompt):
18
+ if prompt.startswith('OCR tokens: '):
19
+ pattern = r"Question: (.*?) Short answer:"
20
+ match = re.search(pattern, prompt, re.DOTALL)
21
+ question = match.group(1)
22
+ elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
23
+ if prompt.startswith('Reference OCR token:'):
24
+ question = prompt.split('\n')[1]
25
+ else:
26
+ question = prompt.split('\n')[0]
27
+ elif len(prompt.split('\n')) == 2:
28
+ question = prompt.split('\n')[0]
29
+ else:
30
+ assert False
31
+
32
+ return question.lower()
33
+
34
+
35
+ def eval_single(annotation_file, result_file):
36
+ experiment_name = os.path.splitext(os.path.basename(result_file))[0]
37
+ print(experiment_name)
38
+ annotations = json.load(open(annotation_file))['data']
39
+ annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
40
+ results = [json.loads(line) for line in open(result_file)]
41
+
42
+ pred_list = []
43
+ for result in results:
44
+ annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
45
+ pred_list.append({
46
+ "pred_answer": result['text'],
47
+ "gt_answers": annotation['answers'],
48
+ })
49
+
50
+ evaluator = TextVQAAccuracyEvaluator()
51
+ print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
52
+
53
+
54
+ if __name__ == "__main__":
55
+ args = get_args()
56
+
57
+ if args.result_file is not None:
58
+ eval_single(args.annotation_file, args.result_file)
59
+
60
+ if args.result_dir is not None:
61
+ for result_file in sorted(os.listdir(args.result_dir)):
62
+ if not result_file.endswith('.jsonl'):
63
+ print(f'Skipping {result_file}')
64
+ continue
65
+ eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
llava-phi/llava_phi/eval/m4c_evaluator.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import re
3
+
4
+ from tqdm import tqdm
5
+
6
+
7
+ class EvalAIAnswerProcessor:
8
+ """
9
+ Processes an answer similar to Eval AI
10
+ copied from
11
+ https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
12
+ """
13
+
14
+ CONTRACTIONS = {
15
+ "aint": "ain't",
16
+ "arent": "aren't",
17
+ "cant": "can't",
18
+ "couldve": "could've",
19
+ "couldnt": "couldn't",
20
+ "couldn'tve": "couldn't've",
21
+ "couldnt've": "couldn't've",
22
+ "didnt": "didn't",
23
+ "doesnt": "doesn't",
24
+ "dont": "don't",
25
+ "hadnt": "hadn't",
26
+ "hadnt've": "hadn't've",
27
+ "hadn'tve": "hadn't've",
28
+ "hasnt": "hasn't",
29
+ "havent": "haven't",
30
+ "hed": "he'd",
31
+ "hed've": "he'd've",
32
+ "he'dve": "he'd've",
33
+ "hes": "he's",
34
+ "howd": "how'd",
35
+ "howll": "how'll",
36
+ "hows": "how's",
37
+ "Id've": "I'd've",
38
+ "I'dve": "I'd've",
39
+ "Im": "I'm",
40
+ "Ive": "I've",
41
+ "isnt": "isn't",
42
+ "itd": "it'd",
43
+ "itd've": "it'd've",
44
+ "it'dve": "it'd've",
45
+ "itll": "it'll",
46
+ "let's": "let's",
47
+ "maam": "ma'am",
48
+ "mightnt": "mightn't",
49
+ "mightnt've": "mightn't've",
50
+ "mightn'tve": "mightn't've",
51
+ "mightve": "might've",
52
+ "mustnt": "mustn't",
53
+ "mustve": "must've",
54
+ "neednt": "needn't",
55
+ "notve": "not've",
56
+ "oclock": "o'clock",
57
+ "oughtnt": "oughtn't",
58
+ "ow's'at": "'ow's'at",
59
+ "'ows'at": "'ow's'at",
60
+ "'ow'sat": "'ow's'at",
61
+ "shant": "shan't",
62
+ "shed've": "she'd've",
63
+ "she'dve": "she'd've",
64
+ "she's": "she's",
65
+ "shouldve": "should've",
66
+ "shouldnt": "shouldn't",
67
+ "shouldnt've": "shouldn't've",
68
+ "shouldn'tve": "shouldn't've",
69
+ "somebody'd": "somebodyd",
70
+ "somebodyd've": "somebody'd've",
71
+ "somebody'dve": "somebody'd've",
72
+ "somebodyll": "somebody'll",
73
+ "somebodys": "somebody's",
74
+ "someoned": "someone'd",
75
+ "someoned've": "someone'd've",
76
+ "someone'dve": "someone'd've",
77
+ "someonell": "someone'll",
78
+ "someones": "someone's",
79
+ "somethingd": "something'd",
80
+ "somethingd've": "something'd've",
81
+ "something'dve": "something'd've",
82
+ "somethingll": "something'll",
83
+ "thats": "that's",
84
+ "thered": "there'd",
85
+ "thered've": "there'd've",
86
+ "there'dve": "there'd've",
87
+ "therere": "there're",
88
+ "theres": "there's",
89
+ "theyd": "they'd",
90
+ "theyd've": "they'd've",
91
+ "they'dve": "they'd've",
92
+ "theyll": "they'll",
93
+ "theyre": "they're",
94
+ "theyve": "they've",
95
+ "twas": "'twas",
96
+ "wasnt": "wasn't",
97
+ "wed've": "we'd've",
98
+ "we'dve": "we'd've",
99
+ "weve": "we've",
100
+ "werent": "weren't",
101
+ "whatll": "what'll",
102
+ "whatre": "what're",
103
+ "whats": "what's",
104
+ "whatve": "what've",
105
+ "whens": "when's",
106
+ "whered": "where'd",
107
+ "wheres": "where's",
108
+ "whereve": "where've",
109
+ "whod": "who'd",
110
+ "whod've": "who'd've",
111
+ "who'dve": "who'd've",
112
+ "wholl": "who'll",
113
+ "whos": "who's",
114
+ "whove": "who've",
115
+ "whyll": "why'll",
116
+ "whyre": "why're",
117
+ "whys": "why's",
118
+ "wont": "won't",
119
+ "wouldve": "would've",
120
+ "wouldnt": "wouldn't",
121
+ "wouldnt've": "wouldn't've",
122
+ "wouldn'tve": "wouldn't've",
123
+ "yall": "y'all",
124
+ "yall'll": "y'all'll",
125
+ "y'allll": "y'all'll",
126
+ "yall'd've": "y'all'd've",
127
+ "y'alld've": "y'all'd've",
128
+ "y'all'dve": "y'all'd've",
129
+ "youd": "you'd",
130
+ "youd've": "you'd've",
131
+ "you'dve": "you'd've",
132
+ "youll": "you'll",
133
+ "youre": "you're",
134
+ "youve": "you've",
135
+ }
136
+
137
+ NUMBER_MAP = {
138
+ "none": "0",
139
+ "zero": "0",
140
+ "one": "1",
141
+ "two": "2",
142
+ "three": "3",
143
+ "four": "4",
144
+ "five": "5",
145
+ "six": "6",
146
+ "seven": "7",
147
+ "eight": "8",
148
+ "nine": "9",
149
+ "ten": "10",
150
+ }
151
+ ARTICLES = ["a", "an", "the"]
152
+ PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
153
+ COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
154
+ PUNCTUATIONS = [
155
+ ";",
156
+ r"/",
157
+ "[",
158
+ "]",
159
+ '"',
160
+ "{",
161
+ "}",
162
+ "(",
163
+ ")",
164
+ "=",
165
+ "+",
166
+ "\\",
167
+ "_",
168
+ "-",
169
+ ">",
170
+ "<",
171
+ "@",
172
+ "`",
173
+ ",",
174
+ "?",
175
+ "!",
176
+ ]
177
+
178
+ def __init__(self, *args, **kwargs):
179
+ pass
180
+
181
+ def word_tokenize(self, word):
182
+ word = word.lower()
183
+ word = word.replace(",", "").replace("?", "").replace("'s", " 's")
184
+ return word.strip()
185
+
186
+ def process_punctuation(self, in_text):
187
+ out_text = in_text
188
+ for p in self.PUNCTUATIONS:
189
+ if (p + " " in in_text or " " + p in in_text) or (
190
+ re.search(self.COMMA_STRIP, in_text) is not None
191
+ ):
192
+ out_text = out_text.replace(p, "")
193
+ else:
194
+ out_text = out_text.replace(p, " ")
195
+ out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
196
+ return out_text
197
+
198
+ def process_digit_article(self, in_text):
199
+ out_text = []
200
+ temp_text = in_text.lower().split()
201
+ for word in temp_text:
202
+ word = self.NUMBER_MAP.setdefault(word, word)
203
+ if word not in self.ARTICLES:
204
+ out_text.append(word)
205
+ else:
206
+ pass
207
+ for word_id, word in enumerate(out_text):
208
+ if word in self.CONTRACTIONS:
209
+ out_text[word_id] = self.CONTRACTIONS[word]
210
+ out_text = " ".join(out_text)
211
+ return out_text
212
+
213
+ def __call__(self, item):
214
+ item = self.word_tokenize(item)
215
+ item = item.replace("\n", " ").replace("\t", " ").strip()
216
+ item = self.process_punctuation(item)
217
+ item = self.process_digit_article(item)
218
+ return item
219
+
220
+
221
+ class TextVQAAccuracyEvaluator:
222
+ def __init__(self):
223
+ self.answer_processor = EvalAIAnswerProcessor()
224
+
225
+ def _compute_answer_scores(self, raw_answers):
226
+ """
227
+ compute the accuracy (soft score) of human answers
228
+ """
229
+ answers = [self.answer_processor(a) for a in raw_answers]
230
+ assert len(answers) == 10
231
+ gt_answers = list(enumerate(answers))
232
+ unique_answers = set(answers)
233
+ unique_answer_scores = {}
234
+
235
+ for unique_answer in unique_answers:
236
+ accs = []
237
+ for gt_answer in gt_answers:
238
+ other_answers = [item for item in gt_answers if item != gt_answer]
239
+ matching_answers = [
240
+ item for item in other_answers if item[1] == unique_answer
241
+ ]
242
+ acc = min(1, float(len(matching_answers)) / 3)
243
+ accs.append(acc)
244
+ unique_answer_scores[unique_answer] = sum(accs) / len(accs)
245
+
246
+ return unique_answer_scores
247
+
248
+ def eval_pred_list(self, pred_list):
249
+ pred_scores = []
250
+ for entry in tqdm(pred_list):
251
+ pred_answer = self.answer_processor(entry["pred_answer"])
252
+ unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
253
+ score = unique_answer_scores.get(pred_answer, 0.0)
254
+ pred_scores.append(score)
255
+
256
+ accuracy = sum(pred_scores) / len(pred_scores)
257
+ return accuracy
258
+
259
+
260
+ class STVQAAccuracyEvaluator:
261
+ def __init__(self):
262
+ self.answer_processor = EvalAIAnswerProcessor()
263
+
264
+ def eval_pred_list(self, pred_list):
265
+ pred_scores = []
266
+ for entry in pred_list:
267
+ pred_answer = self.answer_processor(entry["pred_answer"])
268
+ gts = [self.answer_processor(a) for a in entry["gt_answers"]]
269
+ score = 1.0 if pred_answer in gts else 0.0
270
+ pred_scores.append(score)
271
+
272
+ accuracy = sum(pred_scores) / len(pred_scores)
273
+ return accuracy
274
+
275
+
276
+ class STVQAANLSEvaluator:
277
+ def __init__(self):
278
+ import editdistance # install with `pip install editdistance`
279
+
280
+ self.get_edit_distance = editdistance.eval
281
+
282
+ def get_anls(self, s1, s2):
283
+ s1 = s1.lower().strip()
284
+ s2 = s2.lower().strip()
285
+ iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
286
+ anls = iou if iou >= 0.5 else 0.0
287
+ return anls
288
+
289
+ def eval_pred_list(self, pred_list):
290
+ pred_scores = []
291
+ for entry in pred_list:
292
+ anls = max(
293
+ self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
294
+ )
295
+ pred_scores.append(anls)
296
+
297
+ accuracy = sum(pred_scores) / len(pred_scores)
298
+ return accuracy
299
+
300
+
301
+ class TextCapsBleu4Evaluator:
302
+ def __init__(self):
303
+ # The following script requires Java 1.8.0 and pycocotools installed.
304
+ # The pycocoevalcap can be installed with pip as
305
+ # pip install git+https://github.com/ronghanghu/coco-caption.git@python23
306
+ # Original pycocoevalcap code is at https://github.com/tylin/coco-caption
307
+ # but has no python3 support yet.
308
+ try:
309
+ from pycocoevalcap.bleu.bleu import Bleu
310
+ from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
311
+ except ModuleNotFoundError:
312
+ print(
313
+ "Please install pycocoevalcap module using "
314
+ "pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
315
+ )
316
+ raise
317
+
318
+ self.tokenizer = PTBTokenizer()
319
+ self.scorer = Bleu(4)
320
+
321
+ def eval_pred_list(self, pred_list):
322
+ # Create reference and hypotheses captions.
323
+ gts = {}
324
+ res = {}
325
+ for idx, entry in enumerate(pred_list):
326
+ gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
327
+ res[idx] = [{"caption": entry["pred_answer"]}]
328
+
329
+ gts = self.tokenizer.tokenize(gts)
330
+ res = self.tokenizer.tokenize(res)
331
+ score, _ = self.scorer.compute_score(gts, res)
332
+
333
+ bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
334
+ return bleu4
llava-phi/llava_phi/eval/model_qa.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
3
+ import torch
4
+ import os
5
+ import json
6
+ from tqdm import tqdm
7
+ import shortuuid
8
+
9
+ from llava_phi.conversation import default_conversation
10
+ from llava_phi.utils import disable_torch_init
11
+
12
+
13
+ # new stopping implementation
14
+ class KeywordsStoppingCriteria(StoppingCriteria):
15
+ def __init__(self, keywords, tokenizer, input_ids):
16
+ self.keywords = keywords
17
+ self.tokenizer = tokenizer
18
+ self.start_len = None
19
+ self.input_ids = input_ids
20
+
21
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
22
+ if self.start_len is None:
23
+ self.start_len = self.input_ids.shape[1]
24
+ else:
25
+ outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
26
+ for keyword in self.keywords:
27
+ if keyword in outputs:
28
+ return True
29
+ return False
30
+
31
+
32
+ @torch.inference_mode()
33
+ def eval_model(model_name, questions_file, answers_file):
34
+ # Model
35
+ disable_torch_init()
36
+ model_name = os.path.expanduser(model_name)
37
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
38
+ model = AutoModelForCausalLM.from_pretrained(model_name,
39
+ torch_dtype=torch.float16).cuda()
40
+
41
+
42
+ ques_file = open(os.path.expanduser(questions_file), "r")
43
+ ans_file = open(os.path.expanduser(answers_file), "w")
44
+ for i, line in enumerate(tqdm(ques_file)):
45
+ idx = json.loads(line)["question_id"]
46
+ qs = json.loads(line)["text"]
47
+ cat = json.loads(line)["category"]
48
+ conv = default_conversation.copy()
49
+ conv.append_message(conv.roles[0], qs)
50
+ prompt = conv.get_prompt()
51
+ inputs = tokenizer([prompt])
52
+ input_ids = torch.as_tensor(inputs.input_ids).cuda()
53
+ # stopping_criteria = KeywordsStoppingCriteria([conv.sep], tokenizer, input_ids)
54
+ output_ids = model.generate(
55
+ input_ids,
56
+ do_sample=True,
57
+ use_cache=True,
58
+ temperature=0.7,
59
+ max_new_tokens=1024,
60
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
61
+ pad_token_id=tokenizer.eos_token_id, # Pad token
62
+ # stopping_criteria=[stopping_criteria]
63
+ )
64
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
65
+ try:
66
+ index = outputs.index(conv.sep, len(prompt))
67
+ except ValueError:
68
+ outputs += conv.sep
69
+ index = outputs.index(conv.sep, len(prompt))
70
+
71
+ outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
72
+ ans_id = shortuuid.uuid()
73
+ ans_file.write(json.dumps({"question_id": idx,
74
+ "text": outputs,
75
+ "answer_id": ans_id,
76
+ "model_id": model_name,
77
+ "metadata": {}}) + "\n")
78
+ ans_file.flush()
79
+ ans_file.close()
80
+
81
+ if __name__ == "__main__":
82
+ parser = argparse.ArgumentParser()
83
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
84
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
85
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
86
+ args = parser.parse_args()
87
+
88
+ eval_model(args.model_name, args.question_file, args.answers_file)
llava-phi/llava_phi/eval/model_vqa.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava_phi.conversation import conv_templates, SeparatorStyle
10
+ from llava_phi.model.builder import load_pretrained_model
11
+ from llava_phi.utils import disable_torch_init
12
+ from llava_phi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
13
+
14
+ from PIL import Image
15
+ import math
16
+
17
+
18
+ def split_list(lst, n):
19
+ """Split a list into n (roughly) equal-sized chunks"""
20
+ chunk_size = math.ceil(len(lst) / n) # integer division
21
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
22
+
23
+
24
+ def get_chunk(lst, n, k):
25
+ chunks = split_list(lst, n)
26
+ return chunks[k]
27
+
28
+
29
+ def eval_model(args):
30
+ # Model
31
+ disable_torch_init()
32
+ model_path = os.path.expanduser(args.model_path)
33
+ model_name = get_model_name_from_path(model_path)
34
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
35
+
36
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
37
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
38
+ answers_file = os.path.expanduser(args.answers_file)
39
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
40
+ ans_file = open(answers_file, "w")
41
+ for line in tqdm(questions):
42
+ idx = line["question_id"]
43
+ image_file = line["image"]
44
+ qs = line["text"]
45
+ cur_prompt = qs
46
+ if model.config.mm_use_im_start_end:
47
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
48
+ else:
49
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
50
+
51
+ conv = conv_templates[args.conv_mode].copy()
52
+ conv.append_message(conv.roles[0], qs)
53
+ conv.append_message(conv.roles[1], None)
54
+ prompt = conv.get_prompt()
55
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
56
+
57
+ image = Image.open(os.path.join(args.image_folder, image_file))
58
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
59
+
60
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
61
+ keywords = [stop_str]
62
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
63
+
64
+ with torch.inference_mode():
65
+ output_ids = model.generate(
66
+ input_ids,
67
+ images=image_tensor.unsqueeze(0).cuda(),
68
+ do_sample=True if args.temperature > 0 else False,
69
+ temperature=args.temperature,
70
+ top_p=args.top_p,
71
+ num_beams=args.num_beams,
72
+ # no_repeat_ngram_size=3,
73
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
74
+ pad_token_id=tokenizer.eos_token_id, # Pad token
75
+ max_new_tokens=1024,
76
+ use_cache=True
77
+ )
78
+
79
+ input_token_len = input_ids.shape[1]
80
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
81
+ if n_diff_input_output > 0:
82
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
83
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
84
+ outputs = outputs.strip()
85
+ if outputs.endswith(stop_str):
86
+ outputs = outputs[:-len(stop_str)]
87
+ outputs = outputs.strip()
88
+
89
+ ans_id = shortuuid.uuid()
90
+ ans_file.write(json.dumps({"question_id": idx,
91
+ "image_id": image_file,
92
+ "prompt": cur_prompt,
93
+ "text": outputs,
94
+ "answer_id": ans_id,
95
+ "model_id": model_name,
96
+ "metadata": {}}) + "\n")
97
+ ans_file.flush()
98
+ ans_file.close()
99
+
100
+ if __name__ == "__main__":
101
+ parser = argparse.ArgumentParser()
102
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
103
+ parser.add_argument("--model-base", type=str, default=None)
104
+ parser.add_argument("--image-folder", type=str, default="")
105
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
106
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
107
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
108
+ parser.add_argument("--num-chunks", type=int, default=1)
109
+ parser.add_argument("--chunk-idx", type=int, default=0)
110
+ parser.add_argument("--temperature", type=float, default=0.2)
111
+ parser.add_argument("--top_p", type=float, default=None)
112
+ parser.add_argument("--num_beams", type=int, default=1)
113
+ args = parser.parse_args()
114
+
115
+ eval_model(args)
llava-phi/llava_phi/eval/model_vqa_loader.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava_phi.conversation import conv_templates, SeparatorStyle
10
+ from llava_phi.model.builder import load_pretrained_model
11
+ from llava_phi.utils import disable_torch_init
12
+ from llava_phi.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
13
+ from torch.utils.data import Dataset, DataLoader
14
+
15
+ from PIL import Image
16
+ import math
17
+
18
+
19
+ def split_list(lst, n):
20
+ """Split a list into n (roughly) equal-sized chunks"""
21
+ chunk_size = math.ceil(len(lst) / n) # integer division
22
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
23
+
24
+
25
+ def get_chunk(lst, n, k):
26
+ chunks = split_list(lst, n)
27
+ return chunks[k]
28
+
29
+
30
+ # Custom dataset class
31
+ class CustomDataset(Dataset):
32
+ def __init__(self, questions, image_folder, tokenizer, image_processor, model_config):
33
+ self.questions = questions
34
+ self.image_folder = image_folder
35
+ self.tokenizer = tokenizer
36
+ self.image_processor = image_processor
37
+ self.model_config = model_config
38
+
39
+ def __getitem__(self, index):
40
+ line = self.questions[index]
41
+ image_file = line["image"]
42
+ qs = line["text"]
43
+ if self.model_config.mm_use_im_start_end:
44
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
45
+ else:
46
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
47
+
48
+ conv = conv_templates[args.conv_mode].copy()
49
+ conv.append_message(conv.roles[0], qs)
50
+ conv.append_message(conv.roles[1], None)
51
+ prompt = conv.get_prompt()
52
+ image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
53
+ image_tensor = process_images([image], self.image_processor, self.model_config)[0]
54
+
55
+ input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
56
+
57
+ return input_ids, image_tensor
58
+
59
+ def __len__(self):
60
+ return len(self.questions)
61
+
62
+
63
+ # DataLoader
64
+ def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
65
+ assert batch_size == 1, "batch_size must be 1"
66
+ dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
67
+ data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
68
+ return data_loader
69
+
70
+
71
+ def eval_model(args):
72
+ # Model
73
+ disable_torch_init()
74
+ model_path = os.path.expanduser(args.model_path)
75
+ model_name = get_model_name_from_path(model_path)
76
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
77
+
78
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
79
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
80
+ answers_file = os.path.expanduser(args.answers_file)
81
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
82
+ ans_file = open(answers_file, "w")
83
+
84
+ data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
85
+
86
+ for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)):
87
+ idx = line["question_id"]
88
+ cur_prompt = line["text"]
89
+
90
+ stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2
91
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
92
+
93
+ with torch.inference_mode():
94
+ output_ids = model.generate(
95
+ input_ids,
96
+ images=image_tensor.to(device='cuda', non_blocking=True),
97
+ do_sample=True if args.temperature > 0 else False,
98
+ temperature=args.temperature,
99
+ top_p=args.top_p,
100
+ # no_repeat_ngram_size=3,
101
+ num_beams=args.num_beams,
102
+ max_new_tokens=128,
103
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
104
+ pad_token_id=tokenizer.eos_token_id, # Pad token
105
+ use_cache=True
106
+ )
107
+
108
+ input_token_len = input_ids.shape[1]
109
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
110
+
111
+ if n_diff_input_output > 0:
112
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
113
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
114
+ outputs = outputs.strip()
115
+ if outputs.endswith(stop_str):
116
+ outputs = outputs[:-len(stop_str)]
117
+ outputs = outputs.strip()
118
+
119
+ ans_id = shortuuid.uuid()
120
+ ans_file.write(json.dumps({"question_id": idx,
121
+ "prompt": cur_prompt,
122
+ "text": outputs,
123
+ "answer_id": ans_id,
124
+ "model_id": model_name,
125
+ "metadata": {}}) + "\n")
126
+ # ans_file.flush()
127
+ ans_file.close()
128
+
129
+ if __name__ == "__main__":
130
+ parser = argparse.ArgumentParser()
131
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
132
+ parser.add_argument("--model-base", type=str, default=None)
133
+ parser.add_argument("--image-folder", type=str, default="")
134
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
135
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
136
+ parser.add_argument("--conv-mode", type=str, default="v0")
137
+ parser.add_argument("--num-chunks", type=int, default=1)
138
+ parser.add_argument("--chunk-idx", type=int, default=0)
139
+ parser.add_argument("--temperature", type=float, default=0.2)
140
+ parser.add_argument("--top_p", type=float, default=None)
141
+ parser.add_argument("--num_beams", type=int, default=1)
142
+ args = parser.parse_args()
143
+
144
+ eval_model(args)
llava-phi/llava_phi/eval/model_vqa_mmbench.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ import pandas as pd
6
+ from tqdm import tqdm
7
+ import shortuuid
8
+
9
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
10
+ from llava_phi.conversation import conv_templates, SeparatorStyle
11
+ from llava_phi.model.builder import load_pretrained_model
12
+ from llava_phi.utils import disable_torch_init
13
+ from llava_phi.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
14
+
15
+ from PIL import Image
16
+ import math
17
+
18
+
19
+ all_options = ['A', 'B', 'C', 'D']
20
+
21
+
22
+ def split_list(lst, n):
23
+ """Split a list into n (roughly) equal-sized chunks"""
24
+ chunk_size = math.ceil(len(lst) / n) # integer division
25
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
26
+
27
+
28
+ def get_chunk(lst, n, k):
29
+ chunks = split_list(lst, n)
30
+ return chunks[k]
31
+
32
+
33
+ def is_none(value):
34
+ if value is None:
35
+ return True
36
+ if type(value) is float and math.isnan(value):
37
+ return True
38
+ if type(value) is str and value.lower() == 'nan':
39
+ return True
40
+ if type(value) is str and value.lower() == 'none':
41
+ return True
42
+ return False
43
+
44
+ def get_options(row, options):
45
+ parsed_options = []
46
+ for option in options:
47
+ option_value = row[option]
48
+ if is_none(option_value):
49
+ break
50
+ parsed_options.append(option_value)
51
+ return parsed_options
52
+
53
+
54
+ def eval_model(args):
55
+ # Model
56
+ disable_torch_init()
57
+ model_path = os.path.expanduser(args.model_path)
58
+ model_name = get_model_name_from_path(model_path)
59
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
60
+
61
+ questions = pd.read_table(os.path.expanduser(args.question_file))
62
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
63
+ answers_file = os.path.expanduser(args.answers_file)
64
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
65
+ ans_file = open(answers_file, "w")
66
+
67
+ if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
68
+ args.conv_mode = args.conv_mode + '_mmtag'
69
+ print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
70
+
71
+ for index, row in tqdm(questions.iterrows(), total=len(questions)):
72
+ options = get_options(row, all_options)
73
+ cur_option_char = all_options[:len(options)]
74
+
75
+ if args.all_rounds:
76
+ num_rounds = len(options)
77
+ else:
78
+ num_rounds = 1
79
+
80
+ for round_idx in range(num_rounds):
81
+ idx = row['index']
82
+ question = row['question']
83
+ hint = row['hint']
84
+ image = load_image_from_base64(row['image'])
85
+ if not is_none(hint):
86
+ question = hint + '\n' + question
87
+ for option_char, option in zip(all_options[:len(options)], options):
88
+ question = question + '\n' + option_char + '. ' + option
89
+ qs = cur_prompt = question
90
+ if model.config.mm_use_im_start_end:
91
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
92
+ else:
93
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
94
+
95
+ if args.single_pred_prompt:
96
+ if args.lang == 'cn':
97
+ qs = qs + '\n' + "请直接回答选项字母。"
98
+ else:
99
+ qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
100
+
101
+ conv = conv_templates[args.conv_mode].copy()
102
+ conv.append_message(conv.roles[0], qs)
103
+ conv.append_message(conv.roles[1], None)
104
+ prompt = conv.get_prompt()
105
+
106
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
107
+
108
+ image_tensor = process_images([image], image_processor, model.config)[0]
109
+ # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
110
+
111
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
112
+
113
+ with torch.inference_mode():
114
+ output_ids = model.generate(
115
+ input_ids,
116
+ images=image_tensor.unsqueeze(0).cuda(),
117
+ do_sample=True if args.temperature > 0 else False,
118
+ temperature=args.temperature,
119
+ top_p=args.top_p,
120
+ num_beams=args.num_beams,
121
+ # no_repeat_ngram_size=3,
122
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
123
+ pad_token_id=tokenizer.eos_token_id, # Pad token
124
+ max_new_tokens=1024,
125
+ use_cache=True
126
+ )
127
+
128
+ input_token_len = input_ids.shape[1]
129
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
130
+ if n_diff_input_output > 0:
131
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
132
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
133
+ outputs = outputs.strip()
134
+ if outputs.endswith(stop_str):
135
+ outputs = outputs[:-len(stop_str)]
136
+ outputs = outputs.strip()
137
+
138
+ ans_id = shortuuid.uuid()
139
+ ans_file.write(json.dumps({"question_id": idx,
140
+ "round_id": round_idx,
141
+ "prompt": cur_prompt,
142
+ "text": outputs,
143
+ "options": options,
144
+ "option_char": cur_option_char,
145
+ "answer_id": ans_id,
146
+ "model_id": model_name,
147
+ "metadata": {}}) + "\n")
148
+ ans_file.flush()
149
+
150
+ # rotate options
151
+ options = options[1:] + options[:1]
152
+ cur_option_char = cur_option_char[1:] + cur_option_char[:1]
153
+ ans_file.close()
154
+
155
+ if __name__ == "__main__":
156
+ parser = argparse.ArgumentParser()
157
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
158
+ parser.add_argument("--model-base", type=str, default=None)
159
+ parser.add_argument("--image-folder", type=str, default="")
160
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
161
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
162
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
163
+ parser.add_argument("--num-chunks", type=int, default=1)
164
+ parser.add_argument("--chunk-idx", type=int, default=0)
165
+ parser.add_argument("--temperature", type=float, default=0.2)
166
+ parser.add_argument("--top_p", type=float, default=None)
167
+ parser.add_argument("--num_beams", type=int, default=1)
168
+ parser.add_argument("--all-rounds", action="store_true")
169
+ parser.add_argument("--single-pred-prompt", action="store_true")
170
+ parser.add_argument("--lang", type=str, default="en")
171
+ args = parser.parse_args()
172
+
173
+ eval_model(args)
llava-phi/llava_phi/eval/model_vqa_phi.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava_phi.conversation import conv_templates, SeparatorStyle
10
+ from llava_phi.model.builder import load_pretrained_model
11
+ from llava_phi.utils import disable_torch_init
12
+ from llava_phi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
13
+
14
+ from PIL import Image
15
+ import math
16
+
17
+
18
+ def split_list(lst, n):
19
+ """Split a list into n (roughly) equal-sized chunks"""
20
+ chunk_size = math.ceil(len(lst) / n) # integer division
21
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
22
+
23
+
24
+ def get_chunk(lst, n, k):
25
+ chunks = split_list(lst, n)
26
+ return chunks[k]
27
+
28
+
29
+ def eval_model(args):
30
+ # Model
31
+ disable_torch_init()
32
+ model_path = os.path.expanduser(args.model_path)
33
+ model_name = get_model_name_from_path(model_path)
34
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
35
+
36
+ print(model)
37
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
38
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)[:10]
39
+ answers_file = os.path.expanduser(args.answers_file)
40
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
41
+ ans_file = open(answers_file, "w")
42
+ for line in tqdm(questions):
43
+ idx = line["question_id"]
44
+ image_file = line["image"]
45
+ qs = line["text"]
46
+ cur_prompt = qs
47
+ if model.config.mm_use_im_start_end:
48
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
49
+ else:
50
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
51
+
52
+ conv = conv_templates[args.conv_mode].copy()
53
+ conv.append_message(conv.roles[0], qs)
54
+ conv.append_message(conv.roles[1], None)
55
+ prompt = conv.get_prompt()
56
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
57
+
58
+ image = Image.open(os.path.join(args.image_folder, image_file))
59
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
60
+
61
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
62
+ keywords = [stop_str]
63
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
64
+
65
+ with torch.inference_mode():
66
+ output_ids = model.generate(
67
+ input_ids,
68
+ images=image_tensor.unsqueeze(0).cuda(),
69
+ do_sample=True if args.temperature > 0 else False,
70
+ temperature=args.temperature,
71
+ top_p=args.top_p,
72
+ num_beams=args.num_beams,
73
+ # no_repeat_ngram_size=3,
74
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
75
+ pad_token_id=tokenizer.eos_token_id, # Pad token
76
+ max_new_tokens=1024,
77
+ use_cache=True)
78
+
79
+ input_token_len = input_ids.shape[1]
80
+ print(output_ids[:, input_token_len:])
81
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
82
+ if n_diff_input_output > 0:
83
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
84
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
85
+ outputs = outputs.strip()
86
+ if outputs.endswith(stop_str):
87
+ outputs = outputs[:-len(stop_str)]
88
+ outputs = outputs.strip()
89
+
90
+ ans_id = shortuuid.uuid()
91
+ ans_file.write(json.dumps({"question_id": idx,
92
+ "image_id": image_file,
93
+ "prompt": cur_prompt,
94
+ "text": outputs,
95
+ "answer_id": ans_id,
96
+ "model_id": model_name,
97
+ "metadata": {}}) + "\n")
98
+ ans_file.flush()
99
+ ans_file.close()
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser()
103
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
104
+ parser.add_argument("--model-base", type=str, default=None)
105
+ parser.add_argument("--image-folder", type=str, default="")
106
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
107
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
108
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
109
+ parser.add_argument("--num-chunks", type=int, default=1)
110
+ parser.add_argument("--chunk-idx", type=int, default=0)
111
+ parser.add_argument("--temperature", type=float, default=0.2)
112
+ parser.add_argument("--top_p", type=float, default=None)
113
+ parser.add_argument("--num_beams", type=int, default=1)
114
+ args = parser.parse_args()
115
+ print(args)
116
+
117
+ eval_model(args)
llava-phi/llava_phi/eval/model_vqa_science.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava_phi.conversation import conv_templates, SeparatorStyle
10
+ from llava_phi.model.builder import load_pretrained_model
11
+ from llava_phi.utils import disable_torch_init
12
+ from llava_phi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
13
+
14
+ from PIL import Image
15
+ import math
16
+
17
+
18
+ def split_list(lst, n):
19
+ """Split a list into n (roughly) equal-sized chunks"""
20
+ chunk_size = math.ceil(len(lst) / n) # integer division
21
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
22
+
23
+
24
+ def get_chunk(lst, n, k):
25
+ chunks = split_list(lst, n)
26
+ return chunks[k]
27
+
28
+
29
+ def eval_model(args):
30
+ # Model
31
+ disable_torch_init()
32
+ model_path = os.path.expanduser(args.model_path)
33
+ model_name = get_model_name_from_path(model_path)
34
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
35
+
36
+ questions = json.load(open(os.path.expanduser(args.question_file), "r"))
37
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
38
+ answers_file = os.path.expanduser(args.answers_file)
39
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
40
+ ans_file = open(answers_file, "w")
41
+ for i, line in enumerate(tqdm(questions)):
42
+ idx = line["id"]
43
+ question = line['conversations'][0]
44
+ qs = question['value'].replace('<image>', '').strip()
45
+ cur_prompt = qs
46
+
47
+ if 'image' in line:
48
+ image_file = line["image"]
49
+ image = Image.open(os.path.join(args.image_folder, image_file))
50
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
51
+ images = image_tensor.unsqueeze(0).cuda()
52
+ if getattr(model.config, 'mm_use_im_start_end', False):
53
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
54
+ else:
55
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
56
+ cur_prompt = '<image>' + '\n' + cur_prompt
57
+ else:
58
+ images = None
59
+
60
+ if args.single_pred_prompt:
61
+ qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
62
+ cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly."
63
+
64
+ conv = conv_templates[args.conv_mode].copy()
65
+ conv.append_message(conv.roles[0], qs)
66
+ conv.append_message(conv.roles[1], None)
67
+ prompt = conv.get_prompt()
68
+
69
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
70
+
71
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
72
+ keywords = [stop_str]
73
+ stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None
74
+
75
+ with torch.inference_mode():
76
+ output_ids = model.generate(
77
+ input_ids,
78
+ images=images,
79
+ do_sample=True if args.temperature > 0 else False,
80
+ temperature=args.temperature,
81
+ max_new_tokens=1024,
82
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
83
+ pad_token_id=tokenizer.eos_token_id, # Pad token
84
+ use_cache=True
85
+ # stopping_criteria=stopping_criteria,
86
+ )
87
+
88
+ input_token_len = input_ids.shape[1]
89
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
90
+ if n_diff_input_output > 0:
91
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
92
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
93
+ outputs = outputs.strip()
94
+ if outputs.endswith(stop_str):
95
+ outputs = outputs[:-len(stop_str)]
96
+ outputs = outputs.strip()
97
+
98
+ # prompt for answer
99
+ if args.answer_prompter:
100
+ outputs_reasoning = outputs
101
+ input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
102
+
103
+ with torch.inference_mode():
104
+ output_ids = model.generate(
105
+ input_ids,
106
+ images=images,
107
+ do_sample=True if args.temperature > 0 else False,
108
+ temperature=args.temperature,
109
+ max_new_tokens=64,
110
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
111
+ pad_token_id=tokenizer.eos_token_id, # Pad token
112
+ use_cache=True
113
+ # stopping_criteria=[stopping_criteria]
114
+ )
115
+
116
+ input_token_len = input_ids.shape[1]
117
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
118
+ if n_diff_input_output > 0:
119
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
120
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
121
+ outputs = outputs.strip()
122
+ if outputs.endswith(stop_str):
123
+ outputs = outputs[:-len(stop_str)]
124
+ outputs = outputs.strip()
125
+ outputs = outputs_reasoning + '\n The answer is ' + outputs
126
+
127
+ ans_id = shortuuid.uuid()
128
+ ans_file.write(json.dumps({"question_id": idx,
129
+ "prompt": cur_prompt,
130
+ "text": outputs,
131
+ "answer_id": ans_id,
132
+ "model_id": model_name,
133
+ "metadata": {}}) + "\n")
134
+ ans_file.flush()
135
+ ans_file.close()
136
+
137
+ if __name__ == "__main__":
138
+ parser = argparse.ArgumentParser()
139
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
140
+ parser.add_argument("--model-base", type=str, default=None)
141
+ parser.add_argument("--image-folder", type=str, default="")
142
+ parser.add_argument("--question-file", type=str, default="tables/question.json")
143
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
144
+ parser.add_argument("--conv-mode", type=str, default="llava_v0")
145
+ parser.add_argument("--num-chunks", type=int, default=1)
146
+ parser.add_argument("--chunk-idx", type=int, default=0)
147
+ parser.add_argument("--temperature", type=float, default=0.2)
148
+ parser.add_argument("--answer-prompter", action="store_true")
149
+ parser.add_argument("--single-pred-prompt", action="store_true")
150
+ args = parser.parse_args()
151
+
152
+ eval_model(args)
llava-phi/llava_phi/eval/qa_baseline_gpt35.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate answers with GPT-3.5"""
2
+ # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
3
+ import argparse
4
+ import json
5
+ import os
6
+ import time
7
+ import concurrent.futures
8
+
9
+ import openai
10
+ import tqdm
11
+ import shortuuid
12
+
13
+ MODEL = 'gpt-3.5-turbo'
14
+ MODEL_ID = 'gpt-3.5-turbo:20230327'
15
+
16
+ def get_answer(question_id: int, question: str, max_tokens: int):
17
+ ans = {
18
+ 'answer_id': shortuuid.uuid(),
19
+ 'question_id': question_id,
20
+ 'model_id': MODEL_ID,
21
+ }
22
+ for _ in range(3):
23
+ try:
24
+ response = openai.ChatCompletion.create(
25
+ model=MODEL,
26
+ messages=[{
27
+ 'role': 'system',
28
+ 'content': 'You are a helpful assistant.'
29
+ }, {
30
+ 'role': 'user',
31
+ 'content': question,
32
+ }],
33
+ max_tokens=max_tokens,
34
+ )
35
+ ans['text'] = response['choices'][0]['message']['content']
36
+ return ans
37
+ except Exception as e:
38
+ print('[ERROR]', e)
39
+ ans['text'] = '#ERROR#'
40
+ time.sleep(1)
41
+ return ans
42
+
43
+
44
+ if __name__ == '__main__':
45
+ parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
46
+ parser.add_argument('-q', '--question')
47
+ parser.add_argument('-o', '--output')
48
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
49
+ args = parser.parse_args()
50
+
51
+ questions_dict = {}
52
+ with open(os.path.expanduser(args.question)) as f:
53
+ for line in f:
54
+ if not line:
55
+ continue
56
+ q = json.loads(line)
57
+ questions_dict[q['question_id']] = q['text']
58
+
59
+ answers = []
60
+
61
+ with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
62
+ futures = []
63
+ for qid, question in questions_dict.items():
64
+ future = executor.submit(get_answer, qid, question, args.max_tokens)
65
+ futures.append(future)
66
+
67
+ for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
68
+ answers.append(future.result())
69
+
70
+ answers.sort(key=lambda x: x['question_id'])
71
+
72
+ with open(os.path.expanduser(args.output), 'w') as f:
73
+ table = [json.dumps(ans) for ans in answers]
74
+ f.write('\n'.join(table))
llava-phi/llava_phi/eval/run_llava_phi.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+
4
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5
+ from llava_phi.conversation import conv_templates, SeparatorStyle
6
+ from llava_phi.model.builder import load_pretrained_model
7
+ from llava_phi.utils import disable_torch_init
8
+ from llava_phi.mm_utils import tokenizer_image_token, get_model_name_from_path
9
+
10
+ from PIL import Image
11
+
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+
16
+
17
+ def load_image(image_file):
18
+ if image_file.startswith('http') or image_file.startswith('https'):
19
+ response = requests.get(image_file)
20
+ image = Image.open(BytesIO(response.content)).convert('RGB')
21
+ else:
22
+ image = Image.open(image_file).convert('RGB')
23
+ return image
24
+
25
+
26
+ def eval_model(args):
27
+ # Model
28
+ disable_torch_init()
29
+
30
+ model_name = get_model_name_from_path(args.model_path)
31
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name)
32
+
33
+ qs = args.query
34
+ if model.config.mm_use_im_start_end:
35
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
36
+ else:
37
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
38
+
39
+ if 'phi' in model_name.lower():
40
+ conv_mode = "phi-2_v0"
41
+ else:
42
+ conv_mode = "default"
43
+
44
+ if args.conv_mode is not None and conv_mode != args.conv_mode:
45
+ print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
46
+ else:
47
+ args.conv_mode = conv_mode
48
+
49
+ conv = conv_templates[args.conv_mode].copy()
50
+ conv.append_message(conv.roles[0], qs)
51
+ conv.append_message(conv.roles[1], None)
52
+ prompt = conv.get_prompt()
53
+
54
+ image = load_image(args.image_file)
55
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].cuda()
56
+
57
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
58
+
59
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
60
+
61
+ with torch.inference_mode():
62
+ output_ids = model.generate(
63
+ input_ids,
64
+ images=image_tensor,
65
+ do_sample=True,
66
+ temperature=0.2,
67
+ max_new_tokens=1024,
68
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
69
+ pad_token_id=tokenizer.eos_token_id, # Pad token
70
+ use_cache=True,
71
+ )
72
+
73
+ input_token_len = input_ids.shape[1]
74
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
75
+ if n_diff_input_output > 0:
76
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
77
+ outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
78
+ outputs = outputs.strip()
79
+ if outputs.endswith(stop_str):
80
+ outputs = outputs[:-len(stop_str)]
81
+ outputs = outputs.strip()
82
+ print(outputs)
83
+
84
+ if __name__ == "__main__":
85
+ parser = argparse.ArgumentParser()
86
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
87
+ parser.add_argument("--model-base", type=str, default=None)
88
+ parser.add_argument("--image-file", type=str, required=True)
89
+ parser.add_argument("--query", type=str, required=True)
90
+ parser.add_argument("--conv-mode", type=str, default=None)
91
+ args = parser.parse_args()
92
+
93
+ eval_model(args)
llava-phi/llava_phi/eval/summarize_gpt_review.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from collections import defaultdict
4
+
5
+ import numpy as np
6
+
7
+ import argparse
8
+
9
+ def parse_args():
10
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
11
+ parser.add_argument('-d', '--dir', default=None)
12
+ parser.add_argument('-v', '--version', default=None)
13
+ parser.add_argument('-s', '--select', nargs='*', default=None)
14
+ parser.add_argument('-f', '--files', nargs='*', default=[])
15
+ parser.add_argument('-i', '--ignore', nargs='*', default=[])
16
+ return parser.parse_args()
17
+
18
+
19
+ if __name__ == '__main__':
20
+ args = parse_args()
21
+
22
+ if args.ignore is not None:
23
+ args.ignore = [int(x) for x in args.ignore]
24
+
25
+ if len(args.files) > 0:
26
+ review_files = args.files
27
+ else:
28
+ review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
29
+
30
+ for review_file in sorted(review_files):
31
+ config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
32
+ if args.select is not None and any(x not in config for x in args.select):
33
+ continue
34
+ if '0613' in config:
35
+ version = '0613'
36
+ else:
37
+ version = '0314'
38
+ if args.version is not None and args.version != version:
39
+ continue
40
+ scores = defaultdict(list)
41
+ print(config)
42
+ with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
43
+ for review_str in f:
44
+ review = json.loads(review_str)
45
+ if review['question_id'] in args.ignore:
46
+ continue
47
+ if 'category' in review:
48
+ scores[review['category']].append(review['tuple'])
49
+ scores['all'].append(review['tuple'])
50
+ else:
51
+ if 'tuple' in review:
52
+ scores['all'].append(review['tuple'])
53
+ else:
54
+ scores['all'].append(review['score'])
55
+ for k, v in sorted(scores.items()):
56
+ stats = np.asarray(v).mean(0).tolist()
57
+ stats = [round(x, 3) for x in stats]
58
+ # print(k, stats, round(stats[1]/stats[0]*100, 1))
59
+ print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
60
+ print('=================================')
llava-phi/llava_phi/eval/table/rule.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "coding": {"role": "Assistant", "prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."},
3
+ "math": {"role": "Assistant", "prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question.\nFirstly, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."},
4
+ "default": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
5
+ "conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
6
+ "detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
7
+ "complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
8
+ "llava_bench_conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
9
+ "llava_bench_detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
10
+ "llava_bench_complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}
11
+ }
llava-phi/llava_phi/mm_utils.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ import base64
4
+
5
+ import torch
6
+ from transformers import StoppingCriteria
7
+ from llava_phi.constants import IMAGE_TOKEN_INDEX
8
+
9
+
10
+ def load_image_from_base64(image):
11
+ return Image.open(BytesIO(base64.b64decode(image)))
12
+
13
+
14
+ def expand2square(pil_img, background_color):
15
+ width, height = pil_img.size
16
+ if width == height:
17
+ return pil_img
18
+ elif width > height:
19
+ result = Image.new(pil_img.mode, (width, width), background_color)
20
+ result.paste(pil_img, (0, (width - height) // 2))
21
+ return result
22
+ else:
23
+ result = Image.new(pil_img.mode, (height, height), background_color)
24
+ result.paste(pil_img, ((height - width) // 2, 0))
25
+ return result
26
+
27
+
28
+ def process_images(images, image_processor, model_cfg):
29
+ image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
30
+ new_images = []
31
+ if image_aspect_ratio == 'pad':
32
+ for image in images:
33
+ image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
34
+ image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
35
+ new_images.append(image)
36
+ else:
37
+ return image_processor(images, return_tensors='pt')['pixel_values']
38
+ if all(x.shape == new_images[0].shape for x in new_images):
39
+ new_images = torch.stack(new_images, dim=0)
40
+ return new_images
41
+
42
+
43
+ def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
44
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
45
+
46
+ def insert_separator(X, sep):
47
+ return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
48
+
49
+ input_ids = []
50
+ offset = 0
51
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
52
+ offset = 1
53
+ input_ids.append(prompt_chunks[0][0])
54
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
55
+ input_ids.extend(x[offset:])
56
+
57
+ if return_tensors is not None:
58
+ if return_tensors == 'pt':
59
+ return torch.tensor(input_ids, dtype=torch.long)
60
+ raise ValueError(f'Unsupported tensor type: {return_tensors}')
61
+ return input_ids
62
+
63
+
64
+ def get_model_name_from_path(model_path):
65
+ model_path = model_path.strip("/")
66
+ model_paths = model_path.split("/")
67
+ if model_paths[-1].startswith('checkpoint-'):
68
+ return model_paths[-2] + "_" + model_paths[-1]
69
+ else:
70
+ return model_paths[-1]
71
+
72
+
73
+ class KeywordsStoppingCriteria(StoppingCriteria):
74
+ def __init__(self, keywords, tokenizer, input_ids):
75
+ self.keywords = keywords
76
+ self.keyword_ids = []
77
+ for keyword in keywords:
78
+ cur_keyword_ids = tokenizer(keyword).input_ids
79
+ if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
80
+ cur_keyword_ids = cur_keyword_ids[1:]
81
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
82
+ self.tokenizer = tokenizer
83
+ self.start_len = input_ids.shape[1]
84
+
85
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
86
+ assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
87
+ offset = min(output_ids.shape[1] - self.start_len, 3)
88
+ self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
89
+ for keyword_id in self.keyword_ids:
90
+ if output_ids[0, -keyword_id.shape[0]:] == keyword_id:
91
+ return True
92
+ outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
93
+ for keyword in self.keywords:
94
+ if keyword in outputs:
95
+ return True
96
+ return False
llava-phi/llava_phi/model/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .language_model.llava_phi import LlavaPhiForCausalLM
2
+ from .language_model.configuration_llava_phi import LlavaPhiConfig, LlavaPhiVisionConfig, ProjectorConfig
llava-phi/llava_phi/model/builder.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+ import shutil
4
+
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig, CLIPImageProcessor
6
+ import torch
7
+ from llava_phi.model import *
8
+ from llava_phi.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+
10
+
11
+ def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="cuda", device="cuda"):
12
+ kwargs = {"device_map": device_map}
13
+ if load_8bit:
14
+ kwargs['load_in_8bit'] = True
15
+ elif load_4bit:
16
+ kwargs['load_in_4bit'] = True
17
+ kwargs['quantization_config'] = BitsAndBytesConfig(
18
+ load_in_4bit=True,
19
+ bnb_4bit_compute_dtype=torch.float16,
20
+ bnb_4bit_use_double_quant=True,
21
+ bnb_4bit_quant_type='nf4'
22
+ )
23
+ # else: # TODO: after fine-tuning LLava-Phi, load the model weights with fp16 will pose nan
24
+ # kwargs['torch_dtype'] = torch.float16
25
+
26
+ if 'phi' in model_name.lower():
27
+ # Load LLaVA-Phi model
28
+ if 'lora' in model_name.lower() and model_base is None:
29
+ warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument.')
30
+ if 'lora' in model_name.lower() and model_base is not None:
31
+ lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
32
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
33
+ print('Loading LLaVA-Phi from base model...')
34
+ model = LlavaPhiForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
35
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
36
+ if model.lm_head.weight.shape[0] != token_num:
37
+ model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
38
+ model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
39
+
40
+ print('Loading additional LLaVA-Phi weights...')
41
+ if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
42
+ non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
43
+ else:
44
+ # this is probably from HF Hub
45
+ from huggingface_hub import hf_hub_download
46
+ def load_from_hf(repo_id, filename, subfolder=None):
47
+ cache_file = hf_hub_download(
48
+ repo_id=repo_id,
49
+ filename=filename,
50
+ subfolder=subfolder)
51
+ return torch.load(cache_file, map_location='cpu')
52
+ non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
53
+ non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
54
+ if any(k.startswith('model.model.') for k in non_lora_trainables):
55
+ non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
56
+ model.load_state_dict(non_lora_trainables, strict=False)
57
+
58
+ from peft import PeftModel
59
+ print('Loading LoRA weights...')
60
+ model = PeftModel.from_pretrained(model, model_path)
61
+ print('Merging LoRA weights...')
62
+ model = model.merge_and_unload()
63
+ print('Model is loaded...')
64
+ elif model_base is not None:
65
+ # this may be mm projector only
66
+ print('Loading LLaVA-Phi from base model...')
67
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
68
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
69
+ model = LlavaPhiForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
70
+
71
+ mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
72
+ mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
73
+ model.load_state_dict(mm_projector_weights, strict=False)
74
+ else:
75
+ print("load llaVA-Phi MLLM!!!")
76
+ config = LlavaPhiConfig.from_pretrained(model_path, trust_remote_code=True)
77
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
78
+ model = LlavaPhiForCausalLM.from_pretrained(
79
+ model_path,
80
+ config=config,
81
+ use_safetensors=True,
82
+ **kwargs).to("cuda")
83
+ else:
84
+ # Load language model
85
+ if model_base is not None:
86
+ # PEFT model
87
+ from peft import PeftModel
88
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
89
+ model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
90
+ print(f"Loading LoRA weights from {model_path}")
91
+ model = PeftModel.from_pretrained(model, model_path)
92
+ print(f"Merging weights")
93
+ model = model.merge_and_unload()
94
+ print('Convert to FP16...')
95
+ model.to(torch.float16)
96
+ else:
97
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
98
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
99
+
100
+ image_processor = CLIPImageProcessor.from_pretrained(model_path)
101
+
102
+ if 'phi' in model_name.lower():
103
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
104
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
105
+
106
+ # TODO: the tokenizer length of phi-2 is 50295, but the output class of lm_head is 51200
107
+ if mm_use_im_patch_token:
108
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
109
+ if mm_use_im_start_end:
110
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
111
+ # model.resize_token_embeddings(len(tokenizer))
112
+ else:
113
+ raise ValueError(f"Unsupported model name: {model_name}")
114
+
115
+ if hasattr(model.config, "max_sequence_length"):
116
+ context_len = model.config.max_sequence_length
117
+ else:
118
+ context_len = 2048
119
+ model.to(device="cuda")
120
+ print(kwargs)
121
+ return tokenizer, model, image_processor, context_len
llava-phi/llava_phi/model/language_model/configuration_llava_phi.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Union
3
+ from transformers import PretrainedConfig, PhiConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+
9
+ class LlavaPhiVisionConfig(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
12
+ CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
13
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
14
+ [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
15
+
16
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
17
+ documentation from [`PretrainedConfig`] for more information.
18
+
19
+ Args:
20
+ hidden_size (`int`, *optional*, defaults to 768):
21
+ Dimensionality of the encoder layers and the pooler layer.
22
+ intermediate_size (`int`, *optional*, defaults to 3072):
23
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
24
+ projection_dim (`int`, *optional*, defaults to 512):
25
+ Dimentionality of text and vision projection layers.
26
+ num_hidden_layers (`int`, *optional*, defaults to 12):
27
+ Number of hidden layers in the Transformer encoder.
28
+ num_attention_heads (`int`, *optional*, defaults to 12):
29
+ Number of attention heads for each attention layer in the Transformer encoder.
30
+ num_channels (`int`, *optional*, defaults to 3):
31
+ The number of input channels.
32
+ image_size (`int`, *optional*, defaults to 224):
33
+ The size (resolution) of each image.
34
+ patch_size (`int`, *optional*, defaults to 32):
35
+ The size (resolution) of each patch.
36
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
37
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
38
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
39
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
40
+ The epsilon used by the layer normalization layers.
41
+ attention_dropout (`float`, *optional*, defaults to 0.0):
42
+ The dropout ratio for the attention probabilities.
43
+ initializer_range (`float`, *optional*, defaults to 0.02):
44
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
45
+ initializer_factor (`float`, *optional*, defaults to 1.0):
46
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
47
+ testing).
48
+ mm_vision_select_feature (`str`, *optional*, defaults to `"patch"`):
49
+ The feature to select from the vision encoder output. Can be one of `"patch"` or `"cls_patch"`.
50
+ mm_vision_select_layer (`int`, *optional*, defaults to `-2`):
51
+ The layer to select from the vision encoder output.
52
+
53
+ Example:
54
+
55
+ ```python
56
+ >>> from transformers import CLIPVisionConfig, CLIPVisionModel
57
+
58
+ >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
59
+ >>> configuration = CLIPVisionConfig()
60
+
61
+ >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
62
+ >>> model = CLIPVisionModel(configuration)
63
+
64
+ >>> # Accessing the model configuration
65
+ >>> configuration = model.config
66
+ ```"""
67
+
68
+ model_type = "llava_phi_clip_vision_model"
69
+
70
+ def __init__(
71
+ self,
72
+ hidden_size=768,
73
+ intermediate_size=3072,
74
+ projection_dim=512,
75
+ num_hidden_layers=12,
76
+ num_attention_heads=12,
77
+ num_channels=3,
78
+ image_size=224,
79
+ patch_size=32,
80
+ hidden_act="quick_gelu",
81
+ layer_norm_eps=1e-5,
82
+ attention_dropout=0.0,
83
+ initializer_range=0.02,
84
+ initializer_factor=1.0,
85
+ mm_vision_select_feature="patch",
86
+ mm_vision_select_layer=-2,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(**kwargs)
90
+
91
+ self.hidden_size = hidden_size
92
+ self.intermediate_size = intermediate_size
93
+ self.projection_dim = projection_dim
94
+ self.num_hidden_layers = num_hidden_layers
95
+ self.num_attention_heads = num_attention_heads
96
+ self.num_channels = num_channels
97
+ self.patch_size = patch_size
98
+ self.image_size = image_size
99
+ self.initializer_range = initializer_range
100
+ self.initializer_factor = initializer_factor
101
+ self.attention_dropout = attention_dropout
102
+ self.layer_norm_eps = layer_norm_eps
103
+ self.hidden_act = hidden_act
104
+ self.mm_vision_select_feature = mm_vision_select_feature
105
+ self.mm_vision_select_layer = mm_vision_select_layer
106
+
107
+ @classmethod
108
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
109
+ cls._set_token_in_kwargs(kwargs)
110
+
111
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
112
+
113
+ # get the vision config dict if we are loading from CLIPConfig
114
+ if config_dict.get("model_type") == "llava_phi-phi":
115
+ config_dict = config_dict["vision_config"]
116
+
117
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
118
+ logger.warning(
119
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
120
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
121
+ )
122
+
123
+ return cls.from_dict(config_dict, **kwargs)
124
+
125
+
126
+ class ProjectorConfig(PretrainedConfig):
127
+ model_type = "llava_phi_projector"
128
+
129
+ def __init__(
130
+ self,
131
+ mm_projector_type="linear",
132
+ mm_hidden_size=768,
133
+ hidden_size=2560,
134
+ **kwargs
135
+ ):
136
+ self.mm_projector_type = mm_projector_type
137
+ self.mm_hidden_size = mm_hidden_size
138
+ self.hidden_size = hidden_size
139
+ super().__init__(**kwargs)
140
+
141
+ @classmethod
142
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
143
+ cls._set_token_in_kwargs(kwargs)
144
+
145
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
146
+
147
+ # get the vision config dict if we are loading from CLIPConfig
148
+ if config_dict.get("model_type") == "llava_phi-phi":
149
+ config_dict = config_dict["projector_config"]
150
+
151
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
152
+ logger.warning(
153
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
154
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
155
+ )
156
+
157
+ return cls.from_dict(config_dict, **kwargs)
158
+
159
+
160
+ DEFAULT_VISUAL_CONFIG = {
161
+ "vision_tower": LlavaPhiVisionConfig().to_dict(),
162
+ "mm_projector": ProjectorConfig().to_dict()
163
+ }
164
+
165
+
166
+ class LlavaPhiConfig(PhiConfig):
167
+ model_type = "llava_phi"
168
+
169
+ def __init__(self, vision_config=None, **kwargs):
170
+ if vision_config is None:
171
+ self.vision_config = DEFAULT_VISUAL_CONFIG
172
+ else:
173
+ self.vision_config = vision_config
174
+
175
+ super().__init__(**kwargs)
176
+
177
+
178
+ if __name__ == "__main__":
179
+ print(LlavaPhiVisionConfig())
llava-phi/llava_phi/model/language_model/llava_phi.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn import CrossEntropyLoss
7
+
8
+ from transformers import AutoConfig, AutoModelForCausalLM, \
9
+ PhiModel, PhiPreTrainedModel
10
+
11
+ from transformers.modeling_outputs import CausalLMOutputWithPast
12
+ from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
13
+ from transformers.utils import logging
14
+ from .configuration_llava_phi import LlavaPhiConfig
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class LLavaPhiModel(LlavaMetaModel, PhiModel):
20
+ config_class = LlavaPhiConfig
21
+
22
+ def __init__(self, config):
23
+ super(LLavaPhiModel, self).__init__(config)
24
+
25
+
26
+ class LlavaPhiForCausalLM(PhiPreTrainedModel, LlavaMetaForCausalLM):
27
+ config_class = LlavaPhiConfig
28
+
29
+ def __init__(self, config):
30
+ super(PhiPreTrainedModel, self).__init__(config)
31
+ self.model = LLavaPhiModel(config)
32
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
33
+
34
+ # Initialize weights and apply final processing
35
+ self.post_init()
36
+
37
+ def get_model(self):
38
+ return self.model
39
+
40
+ def forward(
41
+ self,
42
+ input_ids: torch.LongTensor = None,
43
+ attention_mask: Optional[torch.Tensor] = None,
44
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
45
+ inputs_embeds: Optional[torch.FloatTensor] = None,
46
+ labels: Optional[torch.LongTensor] = None,
47
+ use_cache: Optional[bool] = None,
48
+ output_attentions: Optional[bool] = None,
49
+ output_hidden_states: Optional[bool] = None,
50
+ images: Optional[torch.FloatTensor] = None,
51
+ return_dict: Optional[bool] = None,
52
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
53
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
54
+ output_hidden_states = (
55
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
56
+ )
57
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
58
+
59
+ input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(
60
+ input_ids, attention_mask, past_key_values, labels, images)
61
+
62
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
63
+ outputs = self.model(
64
+ input_ids=input_ids,
65
+ attention_mask=attention_mask,
66
+ past_key_values=past_key_values,
67
+ inputs_embeds=inputs_embeds,
68
+ use_cache=use_cache,
69
+ output_attentions=output_attentions,
70
+ output_hidden_states=output_hidden_states,
71
+ return_dict=return_dict
72
+ )
73
+
74
+ hidden_states = outputs[0]
75
+ logits = self.lm_head(hidden_states)
76
+
77
+ loss = None
78
+ if labels is not None:
79
+ # Shift so that tokens < n predict n
80
+ shift_logits = logits[..., :-1, :].contiguous()
81
+ shift_labels = labels[..., 1:].contiguous()
82
+ # Flatten the tokens
83
+ loss_fct = CrossEntropyLoss()
84
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
85
+ shift_labels = shift_labels.view(-1)
86
+ # Enable model/pipeline parallelism
87
+ shift_labels = shift_labels.to(shift_logits.device)
88
+ loss = loss_fct(shift_logits, shift_labels)
89
+
90
+ if not return_dict:
91
+ output = (logits,) + outputs[1:]
92
+ return (loss,) + output if loss is not None else output
93
+
94
+ return CausalLMOutputWithPast(
95
+ loss=loss,
96
+ logits=logits,
97
+ past_key_values=outputs.past_key_values,
98
+ hidden_states=outputs.hidden_states,
99
+ attentions=outputs.attentions,
100
+ )
101
+
102
+ def prepare_inputs_for_generation(
103
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
104
+ ):
105
+ if past_key_values:
106
+ input_ids = input_ids[:, -1:]
107
+
108
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
109
+ if inputs_embeds is not None and past_key_values is None:
110
+ model_inputs = {"inputs_embeds": inputs_embeds}
111
+ else:
112
+ model_inputs = {"input_ids": input_ids}
113
+
114
+ model_inputs.update(
115
+ {
116
+ "past_key_values": past_key_values,
117
+ "use_cache": kwargs.get("use_cache"),
118
+ "attention_mask": attention_mask,
119
+ "images": kwargs.get("images", None),
120
+ }
121
+ )
122
+ return model_inputs
123
+
124
+
125
+ AutoConfig.register("llava_phi", LlavaPhiConfig)
126
+ AutoModelForCausalLM.register(LlavaPhiConfig, LlavaPhiForCausalLM)
llava-phi/llava_phi/model/llava_arch.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ import torch
19
+
20
+ from .multimodal_encoder.clip_encoder import CLIPVisionTower
21
+ from .multimodal_projector.builder import build_vision_projector
22
+ from .language_model.configuration_llava_phi import LlavaPhiConfig, LlavaPhiVisionConfig, ProjectorConfig
23
+ from llava_phi.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
24
+
25
+
26
+ class LlavaMetaModel:
27
+ def __init__(self, config):
28
+ super(LlavaMetaModel, self).__init__(config)
29
+ self.vision_tower = CLIPVisionTower(
30
+ LlavaPhiVisionConfig(**config.vision_config["vision_tower"])
31
+ )
32
+ self.mm_projector = build_vision_projector(
33
+ ProjectorConfig(**config.vision_config["mm_projector"])
34
+ )
35
+
36
+ def get_vision_tower(self):
37
+ vision_tower = getattr(self, 'vision_tower', None)
38
+ if type(vision_tower) is list:
39
+ vision_tower = vision_tower[0]
40
+ return vision_tower
41
+
42
+
43
+ class LlavaMetaForCausalLM(ABC):
44
+
45
+ @abstractmethod
46
+ def get_model(self):
47
+ pass
48
+
49
+ def get_vision_tower(self):
50
+ return self.get_model().get_vision_tower()
51
+
52
+ def encode_images(self, images):
53
+ image_features = self.get_model().get_vision_tower()(images)
54
+ image_features = self.get_model().mm_projector(image_features)
55
+ return image_features
56
+
57
+ def prepare_inputs_labels_for_multimodal(
58
+ self, input_ids, attention_mask, past_key_values, labels, images
59
+ ):
60
+ vision_tower = self.get_vision_tower()
61
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
62
+ if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
63
+ attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
64
+ return input_ids, attention_mask, past_key_values, None, labels
65
+
66
+ if type(images) is list or images.ndim == 5:
67
+ concat_images = torch.cat([image for image in images], dim=0)
68
+ image_features = self.encode_images(concat_images)
69
+ split_sizes = [image.shape[0] for image in images]
70
+ image_features = torch.split(image_features, split_sizes, dim=0)
71
+ image_features = [x.flatten(0, 1) for x in image_features]
72
+ else:
73
+ image_features = self.encode_images(images)
74
+
75
+ new_input_embeds = []
76
+ new_labels = [] if labels is not None else None
77
+ cur_image_idx = 0
78
+ for batch_idx, cur_input_ids in enumerate(input_ids):
79
+ if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
80
+ # multimodal LLM, but the current sample is not multimodal
81
+ # FIXME: this is a hacky fix, for deepspeed zero3 to work
82
+ half_len = cur_input_ids.shape[0] // 2
83
+ cur_image_features = image_features[cur_image_idx]
84
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
85
+ cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
86
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
87
+ new_input_embeds.append(cur_input_embeds)
88
+ if labels is not None:
89
+ new_labels.append(labels[batch_idx])
90
+ cur_image_idx += 1
91
+ continue
92
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
93
+ cur_new_input_embeds = []
94
+ if labels is not None:
95
+ cur_labels = labels[batch_idx]
96
+ cur_new_labels = []
97
+ assert cur_labels.shape == cur_input_ids.shape
98
+ while image_token_indices.numel() > 0:
99
+ cur_image_features = image_features[cur_image_idx]
100
+ image_token_start = image_token_indices[0]
101
+ if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
102
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
103
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
104
+ cur_new_input_embeds.append(cur_image_features)
105
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
106
+ if labels is not None:
107
+ cur_new_labels.append(cur_labels[:image_token_start])
108
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
109
+ cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
110
+ cur_labels = cur_labels[image_token_start+2:]
111
+ else:
112
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
113
+ cur_new_input_embeds.append(cur_image_features)
114
+ if labels is not None:
115
+ cur_new_labels.append(cur_labels[:image_token_start])
116
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
117
+ cur_labels = cur_labels[image_token_start+1:]
118
+ cur_image_idx += 1
119
+ if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
120
+ cur_input_ids = cur_input_ids[image_token_start+2:]
121
+ else:
122
+ cur_input_ids = cur_input_ids[image_token_start+1:]
123
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
124
+ if cur_input_ids.numel() > 0:
125
+ if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
126
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
127
+ else:
128
+ cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
129
+ if labels is not None:
130
+ cur_new_labels.append(cur_labels)
131
+ cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
132
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
133
+ new_input_embeds.append(cur_new_input_embeds)
134
+ if labels is not None:
135
+ cur_new_labels = torch.cat(cur_new_labels, dim=0)
136
+ new_labels.append(cur_new_labels)
137
+
138
+ if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
139
+ max_len = max(x.shape[0] for x in new_input_embeds)
140
+
141
+ new_input_embeds_align = []
142
+ for cur_new_embed in new_input_embeds:
143
+ cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
144
+ new_input_embeds_align.append(cur_new_embed)
145
+ new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
146
+
147
+ if labels is not None:
148
+ new_labels_align = []
149
+ _new_labels = new_labels
150
+ for cur_new_label in new_labels:
151
+ cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
152
+ new_labels_align.append(cur_new_label)
153
+ new_labels = torch.stack(new_labels_align, dim=0)
154
+
155
+ if attention_mask is not None:
156
+ new_attention_mask = []
157
+ for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
158
+ new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
159
+ new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
160
+ cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
161
+ new_attention_mask.append(cur_new_attention_mask)
162
+ attention_mask = torch.stack(new_attention_mask, dim=0)
163
+ assert attention_mask.shape == new_labels.shape
164
+ else:
165
+ new_input_embeds = torch.stack(new_input_embeds, dim=0)
166
+ if labels is not None:
167
+ new_labels = torch.stack(new_labels, dim=0)
168
+
169
+ if attention_mask is not None:
170
+ new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
171
+ attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
172
+ assert attention_mask.shape == new_input_embeds.shape[:2]
173
+
174
+ return None, attention_mask, past_key_values, new_input_embeds, new_labels
175
+
176
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
177
+ if model_args.mm_use_im_patch_token:
178
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
179
+ self.resize_token_embeddings(len(tokenizer))
180
+
181
+ if model_args.mm_use_im_start_end:
182
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
183
+ self.resize_token_embeddings(len(tokenizer))
184
+
185
+ if num_new_tokens > 0:
186
+ input_embeddings = self.get_input_embeddings().weight.data
187
+ output_embeddings = self.get_output_embeddings().weight.data
188
+
189
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
190
+ dim=0, keepdim=True)
191
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
192
+ dim=0, keepdim=True)
193
+
194
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
195
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
196
+
197
+ if model_args.tune_mm_mlp_adapter:
198
+ for p in self.get_input_embeddings().parameters():
199
+ p.requires_grad = True
200
+ for p in self.get_output_embeddings().parameters():
201
+ p.requires_grad = False
202
+
203
+ elif model_args.mm_use_im_patch_token:
204
+ if model_args.tune_mm_mlp_adapter:
205
+ for p in self.get_input_embeddings().parameters():
206
+ p.requires_grad = False
207
+ for p in self.get_output_embeddings().parameters():
208
+ p.requires_grad = False
llava-phi/llava_phi/model/multimodal_encoder/clip_encoder.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from transformers import CLIPPreTrainedModel, CLIPVisionConfig
7
+ from transformers.models.clip.modeling_clip import CLIPVisionTransformer
8
+ from llava_phi.model.language_model.configuration_llava_phi import LlavaPhiVisionConfig
9
+
10
+
11
+ class CLIPVisionTower(CLIPPreTrainedModel):
12
+ config_class = LlavaPhiVisionConfig
13
+
14
+ def __init__(self, config):
15
+ super().__init__(config)
16
+
17
+ self.vision_model = CLIPVisionTransformer(config)
18
+ # Initialize weights and apply final processing
19
+ self.post_init()
20
+
21
+ def get_input_embeddings(self) -> nn.Module:
22
+ return self.vision_model.embeddings.patch_embedding
23
+
24
+ def feature_select(self, image_forward_outs):
25
+ image_features = image_forward_outs.hidden_states[
26
+ self.config.mm_vision_select_layer
27
+ ]
28
+ if self.config.mm_vision_select_feature == "patch":
29
+ image_features = image_features[:, 1:]
30
+ elif self.config.mm_vision_select_feature == "cls_patch":
31
+ image_features = image_features
32
+ else:
33
+ raise ValueError(
34
+ f"Unexpected select feature: {self.config.mm_vision_select_feature}"
35
+ )
36
+ return image_features
37
+
38
+ def forward(self, images):
39
+ if type(images) is list:
40
+ image_features = []
41
+ for image in images:
42
+ image_forward_out = self.vision_model(
43
+ image.to(device=self.device, dtype=self.dtype).unsqueeze(0),
44
+ output_hidden_states=True,
45
+ )
46
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
47
+ image_features.append(image_feature)
48
+ else:
49
+ image_forward_outs = self.vision_model(
50
+ images.to(device=self.device, dtype=self.dtype),
51
+ output_hidden_states=True,
52
+ )
53
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
54
+
55
+ return image_features
56
+
57
+ @property
58
+ def dummy_feature(self):
59
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
60
+
61
+ @property
62
+ def dtype(self):
63
+ return list(self.vision_model.parameters())[0].dtype
64
+
65
+ @property
66
+ def device(self):
67
+ return list(self.vision_model.parameters())[0].device
68
+
69
+ @property
70
+ def hidden_size(self):
71
+ return self.config.hidden_size
72
+
73
+ @property
74
+ def num_patches(self):
75
+ return (self.config.image_size // self.config.patch_size) ** 2
76
+
77
+
78
+ if __name__ == "__main__":
79
+ clip_config = CLIPVisionConfig.from_pretrained(
80
+ "/data/private/zhumj/GPTcode/mm-phi/openai/clip-vit-large-patch14-336"
81
+ )
82
+ print("################ clip_config ##############")
83
+ print(clip_config)
84
+ phi_vis_config = LlavaPhiVisionConfig(**clip_config.to_dict())
85
+ print("################ phi_vis_config ##############")
86
+ print(phi_vis_config)
87
+
88
+ model = CLIPVisionTower(clip_config)
89
+ # print(list(model.vision_model.parameters())[0].dtype)
llava-phi/llava_phi/model/multimodal_projector/builder.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import re
4
+
5
+
6
+ class IdentityMap(nn.Module):
7
+ def __init__(self):
8
+ super().__init__()
9
+
10
+ def forward(self, x, *args, **kwargs):
11
+ return x
12
+
13
+ @property
14
+ def config(self):
15
+ return {"mm_projector_type": "identity"}
16
+
17
+
18
+ class SimpleResBlock(nn.Module):
19
+ def __init__(self, channels):
20
+ super().__init__()
21
+ self.pre_norm = nn.LayerNorm(channels)
22
+
23
+ self.proj = nn.Sequential(
24
+ nn.Linear(channels, channels), nn.GELU(), nn.Linear(channels, channels)
25
+ )
26
+
27
+ def forward(self, x):
28
+ x = self.pre_norm(x)
29
+ return x + self.proj(x)
30
+
31
+
32
+ def build_vision_projector(config):
33
+ projector_type = getattr(config, "mm_projector_type", "linear")
34
+
35
+ if projector_type == "linear":
36
+ return nn.Linear(config.mm_hidden_size, config.hidden_size)
37
+
38
+ mlp_gelu_match = re.match(r"^mlp(\d+)x_gelu$", projector_type)
39
+ if mlp_gelu_match:
40
+ mlp_depth = int(mlp_gelu_match.group(1))
41
+ modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
42
+ for _ in range(1, mlp_depth):
43
+ modules.append(nn.GELU())
44
+ modules.append(nn.Linear(config.hidden_size, config.hidden_size))
45
+ return nn.Sequential(*modules)
46
+
47
+ if projector_type == "identity":
48
+ return IdentityMap()
49
+
50
+ raise ValueError(f"Unknown projector type: {projector_type}")
llava-phi/llava_phi/serve/__init__.py ADDED
File without changes
llava-phi/llava_phi/serve/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (157 Bytes). View file
 
llava-phi/llava_phi/serve/__pycache__/cli.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
llava-phi/llava_phi/serve/app.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import hashlib
3
+ import json
4
+ import os
5
+ import time
6
+ from threading import Thread
7
+
8
+ import gradio as gr
9
+ import torch
10
+ from llava_phi.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
11
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
12
+ from llava_phi.conversation import (SeparatorStyle, conv_templates,
13
+ default_conversation)
14
+ from llava_phi.mm_utils import (KeywordsStoppingCriteria, load_image_from_base64,
15
+ process_images, tokenizer_image_token)
16
+ from llava_phi.model.builder import load_pretrained_model
17
+ from transformers import TextIteratorStreamer
18
+
19
+ print(gr.__version__)
20
+
21
+ block_css = """
22
+
23
+ #buttons button {
24
+ min-width: min(120px,100%);
25
+ }
26
+ """
27
+ title_markdown = ("""
28
+ # LLaVA-Phi: Efficient Multi-Modal Assistant with Small Language Model
29
+ [[Code](https://github.com/zhuyiche/llava-phi)] | 📚 [[Paper](https://arxiv.org/pdf/2401.02330)]
30
+ """)
31
+ tos_markdown = ("""
32
+ ### Terms of use
33
+ By using this service, users are required to agree to the following terms:
34
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.
35
+ For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
36
+ """)
37
+ learn_more_markdown = ("""
38
+ ### License
39
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://huggingface.co/microsoft/phi-2) of Phi-2. Please contact us if you find any potential violation.
40
+ """)
41
+ ack_markdown = ("""
42
+ ### Acknowledgement
43
+ The template for this web demo is from [LLaVA](https://github.com/haotian-liu/LLaVA), and we are very grateful to LLaVA for their open source contributions to the community!
44
+ """)
45
+
46
+
47
+ def regenerate(state, image_process_mode):
48
+ state.messages[-1][-1] = None
49
+ prev_human_msg = state.messages[-2]
50
+ if type(prev_human_msg[1]) in (tuple, list):
51
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
52
+ state.skip_next = False
53
+ return (state, state.to_gradio_chatbot(), "", None)
54
+
55
+
56
+ def clear_history():
57
+ state = default_conversation.copy()
58
+ return (state, state.to_gradio_chatbot(), "", None)
59
+
60
+
61
+ def add_text(state, text, image, image_process_mode):
62
+ if len(text) <= 0 and image is None:
63
+ state.skip_next = True
64
+ return (state, state.to_gradio_chatbot(), "", None)
65
+
66
+ text = text[:1536] # Hard cut-off
67
+ if image is not None:
68
+ text = text[:1200] # Hard cut-off for images
69
+ if '<image>' not in text:
70
+ # text = '<Image><image></Image>' + text
71
+ text = text + '\n<image>'
72
+ text = (text, image, image_process_mode)
73
+ if len(state.get_images(return_pil=True)) > 0:
74
+ state = default_conversation.copy()
75
+ state.append_message(state.roles[0], text)
76
+ state.append_message(state.roles[1], None)
77
+ state.skip_next = False
78
+ return (state, state.to_gradio_chatbot(), "", None)
79
+
80
+
81
+ def load_demo():
82
+ state = default_conversation.copy()
83
+ return state
84
+
85
+
86
+ @torch.inference_mode()
87
+ def get_response(params):
88
+ prompt = params["prompt"]
89
+ ori_prompt = prompt
90
+ images = params.get("images", None)
91
+ num_image_tokens = 0
92
+ if images is not None and len(images) > 0:
93
+ if len(images) > 0:
94
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
95
+ raise ValueError(
96
+ "Number of images does not match number of <image> tokens in prompt")
97
+
98
+ images = [load_image_from_base64(image) for image in images]
99
+ images = process_images(images, image_processor, model.config)
100
+
101
+ if type(images) is list:
102
+ images = [image.to(model.device, dtype=torch.float16)
103
+ for image in images]
104
+ else:
105
+ images = images.to(model.device, dtype=torch.float16)
106
+
107
+ replace_token = DEFAULT_IMAGE_TOKEN
108
+ if getattr(model.config, 'mm_use_im_start_end', False):
109
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
110
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
111
+
112
+ num_image_tokens = prompt.count(
113
+ replace_token) * model.get_vision_tower().num_patches
114
+ else:
115
+ images = None
116
+ image_args = {"images": images}
117
+ else:
118
+ images = None
119
+ image_args = {}
120
+
121
+ temperature = float(params.get("temperature", 1.0))
122
+ top_p = float(params.get("top_p", 1.0))
123
+ max_context_length = getattr(
124
+ model.config, 'max_position_embeddings', 2048)
125
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
126
+ stop_str = params.get("stop", None)
127
+ do_sample = True if temperature > 0.001 else False
128
+
129
+ input_ids = tokenizer_image_token(
130
+ prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
131
+ keywords = [stop_str]
132
+ stopping_criteria = KeywordsStoppingCriteria(
133
+ keywords, tokenizer, input_ids)
134
+ streamer = TextIteratorStreamer(
135
+ tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
136
+
137
+ max_new_tokens = min(max_new_tokens, max_context_length -
138
+ input_ids.shape[-1] - num_image_tokens)
139
+
140
+ if max_new_tokens < 1:
141
+ yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.",
142
+ "error_code": 0}).encode() + b"\0"
143
+ return
144
+
145
+ # local inference
146
+ thread = Thread(target=model.generate, kwargs=dict(
147
+ inputs=input_ids,
148
+ do_sample=do_sample,
149
+ temperature=temperature,
150
+ top_p=top_p,
151
+ max_new_tokens=max_new_tokens,
152
+ streamer=streamer,
153
+ stopping_criteria=[stopping_criteria],
154
+ use_cache=True,
155
+ **image_args
156
+ ))
157
+ thread.start()
158
+
159
+ generated_text = ori_prompt
160
+ for new_text in streamer:
161
+ generated_text += new_text
162
+ if generated_text.endswith(stop_str):
163
+ generated_text = generated_text[:-len(stop_str)]
164
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode()
165
+
166
+
167
+ def http_bot(state, temperature, top_p, max_new_tokens):
168
+ if state.skip_next:
169
+ # This generate call is skipped due to invalid inputs
170
+ yield (state, state.to_gradio_chatbot())
171
+ return
172
+
173
+ if len(state.messages) == state.offset + 2:
174
+ # First round of conversation
175
+ if "phi" in model_name.lower():
176
+ template_name = "phi-2_v0"
177
+ else:
178
+ template_name = "phi-2_v0"
179
+ new_state = conv_templates[template_name].copy()
180
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
181
+ new_state.append_message(new_state.roles[1], None)
182
+ state = new_state
183
+
184
+ # Construct prompt
185
+ prompt = state.get_prompt()
186
+
187
+ all_images = state.get_images(return_pil=True)
188
+ all_image_hash = [hashlib.md5(image.tobytes()).hexdigest()
189
+ for image in all_images]
190
+
191
+ # Make requests
192
+ pload = {
193
+ "model": model_name,
194
+ "prompt": prompt,
195
+ "temperature": float(temperature),
196
+ "top_p": float(top_p),
197
+ "max_new_tokens": min(int(max_new_tokens), 1536),
198
+ "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
199
+ "images": f'List of {len(state.get_images())} images: {all_image_hash}',
200
+ }
201
+
202
+ pload['images'] = state.get_images()
203
+
204
+ state.messages[-1][-1] = "▌"
205
+ yield (state, state.to_gradio_chatbot())
206
+
207
+ # for stream
208
+ output = get_response(pload)
209
+ for chunk in output:
210
+ if chunk:
211
+ data = json.loads(chunk.decode())
212
+ if data["error_code"] == 0:
213
+ output = data["text"][len(prompt):].strip()
214
+ state.messages[-1][-1] = output + "▌"
215
+ yield (state, state.to_gradio_chatbot())
216
+ else:
217
+ output = data["text"] + \
218
+ f" (error_code: {data['error_code']})"
219
+ state.messages[-1][-1] = output
220
+ yield (state, state.to_gradio_chatbot())
221
+ return
222
+ time.sleep(0.03)
223
+
224
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
225
+ yield (state, state.to_gradio_chatbot())
226
+
227
+
228
+ def build_demo():
229
+ textbox = gr.Textbox(
230
+ show_label=False, placeholder="Enter text and press ENTER", container=False)
231
+ with gr.Blocks(title="LLaVA-Phi", theme=gr.themes.Default(), css=block_css) as demo:
232
+ state = gr.State()
233
+ gr.Markdown(title_markdown)
234
+
235
+ with gr.Row():
236
+ with gr.Column(scale=5):
237
+ with gr.Row(elem_id="Model ID"):
238
+ gr.Dropdown(
239
+ choices=['LLaVA-Phi-3B'],
240
+ value='LLaVA-Phi-3B',
241
+ interactive=True,
242
+ label='Model ID',
243
+ container=False)
244
+ imagebox = gr.Image(type="pil")
245
+ image_process_mode = gr.Radio(
246
+ ["Crop", "Resize", "Pad", "Default"],
247
+ value="Default",
248
+ label="Preprocess for non-square image", visible=False)
249
+
250
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
251
+ gr.Examples(examples=[
252
+ [f"{cur_dir}/examples/extreme_ironing.jpg",
253
+ "What is unusual about this image?"],
254
+ [f"{cur_dir}/examples/waterview.jpg",
255
+ "What are the things I should be cautious about when I visit here?"],
256
+ ], inputs=[imagebox, textbox])
257
+
258
+ with gr.Accordion("Parameters", open=False) as _:
259
+ temperature = gr.Slider(
260
+ minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature", )
261
+ top_p = gr.Slider(
262
+ minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P", )
263
+ max_output_tokens = gr.Slider(
264
+ minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens", )
265
+
266
+ with gr.Column(scale=8):
267
+ chatbot = gr.Chatbot(
268
+ elem_id="chatbot", label="LLaVA-Phi Chatbot", height=550)
269
+ with gr.Row():
270
+ with gr.Column(scale=8):
271
+ textbox.render()
272
+ with gr.Column(scale=1, min_width=50):
273
+ submit_btn = gr.Button(value="Send", variant="primary")
274
+ with gr.Row(elem_id="buttons") as _:
275
+ regenerate_btn = gr.Button(
276
+ value="🔄 Regenerate", interactive=True)
277
+ clear_btn = gr.Button(value="🗑️ Clear", interactive=True)
278
+
279
+ gr.Markdown(tos_markdown)
280
+ gr.Markdown(learn_more_markdown)
281
+ gr.Markdown(ack_markdown)
282
+
283
+ regenerate_btn.click(
284
+ regenerate,
285
+ [state, image_process_mode],
286
+ [state, chatbot, textbox, imagebox],
287
+ queue=False
288
+ ).then(
289
+ http_bot,
290
+ [state, temperature, top_p, max_output_tokens],
291
+ [state, chatbot]
292
+ )
293
+
294
+ clear_btn.click(
295
+ clear_history,
296
+ None,
297
+ [state, chatbot, textbox, imagebox],
298
+ queue=False
299
+ )
300
+
301
+ textbox.submit(
302
+ add_text,
303
+ [state, textbox, imagebox, image_process_mode],
304
+ [state, chatbot, textbox, imagebox],
305
+ queue=False
306
+ ).then(
307
+ http_bot,
308
+ [state, temperature, top_p, max_output_tokens],
309
+ [state, chatbot]
310
+ )
311
+
312
+ submit_btn.click(
313
+ add_text,
314
+ [state, textbox, imagebox, image_process_mode],
315
+ [state, chatbot, textbox, imagebox],
316
+ queue=False
317
+ ).then(
318
+ http_bot,
319
+ [state, temperature, top_p, max_output_tokens],
320
+ [state, chatbot]
321
+ )
322
+
323
+ demo.load(
324
+ load_demo,
325
+ None,
326
+ [state],
327
+ queue=False
328
+ )
329
+ return demo
330
+
331
+
332
+ def parse_args():
333
+ parser = argparse.ArgumentParser()
334
+ parser.add_argument("--host", type=str, default="0.0.0.0")
335
+ parser.add_argument("--port", type=int, default=7860)
336
+ parser.add_argument("--share", default=True)
337
+ parser.add_argument("--model-path", type=str,
338
+ default="checkpoints/llavaPhi-v0-3b-finetune")
339
+ parser.add_argument("--model-name", type=str,
340
+ default="llavaPhi-v0-3b")
341
+ args = parser.parse_args()
342
+ return args
343
+
344
+
345
+ if __name__ == '__main__':
346
+ args = parse_args()
347
+ model_name = args.model_name
348
+ tokenizer, model, image_processor, context_len = load_pretrained_model(
349
+ args.model_path, None, args.model_name, False, False)
350
+ demo = build_demo()
351
+ demo.queue()
352
+ demo.launch(server_name=args.host,
353
+ server_port=args.port,
354
+ share=args.share)
llava-phi/llava_phi/serve/cli.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+
4
+ from llava_phi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5
+ from llava_phi.conversation import conv_templates, SeparatorStyle
6
+ from llava_phi.model.builder import load_pretrained_model
7
+ from llava_phi.utils import disable_torch_init
8
+ from llava_phi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
9
+
10
+ from PIL import Image
11
+
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+ from transformers import TextStreamer
16
+
17
+
18
+ def load_image(image_file):
19
+ if image_file.startswith('http') or image_file.startswith('https'):
20
+ response = requests.get(image_file)
21
+ image = Image.open(BytesIO(response.content)).convert('RGB')
22
+ else:
23
+ image = Image.open(image_file).convert('RGB')
24
+ return image
25
+
26
+
27
+ def main(args):
28
+ # Model
29
+ disable_torch_init()
30
+
31
+ model_name = get_model_name_from_path(args.model_path)
32
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
33
+
34
+ if 'llama-2' in model_name.lower():
35
+ conv_mode = "llava_llama_2"
36
+ elif "v1" in model_name.lower():
37
+ conv_mode = "llava_v1"
38
+ elif "mpt" in model_name.lower():
39
+ conv_mode = "mpt"
40
+ else:
41
+ conv_mode = "llava_v0"
42
+ conv_mode="vicuna_v1"
43
+ if args.conv_mode is not None and conv_mode != args.conv_mode:
44
+ print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
45
+ else:
46
+ args.conv_mode = conv_mode
47
+
48
+ conv = conv_templates[args.conv_mode].copy()
49
+ if "mpt" in model_name.lower():
50
+ roles = ('user', 'assistant')
51
+ else:
52
+ roles = conv.roles
53
+
54
+ image = load_image(args.image_file)
55
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].cuda()
56
+
57
+ while True:
58
+ try:
59
+ inp = input(f"{roles[0]}: ")
60
+ except EOFError:
61
+ inp = ""
62
+ if not inp:
63
+ print("exit...")
64
+ break
65
+
66
+ print(f"{roles[1]}: ", end="")
67
+
68
+ if image is not None:
69
+ # first message
70
+ if model.config.mm_use_im_start_end:
71
+ inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
72
+ else:
73
+ inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
74
+ conv.append_message(conv.roles[0], inp)
75
+ image = None
76
+ else:
77
+ # later messages
78
+ conv.append_message(conv.roles[0], inp)
79
+ conv.append_message(conv.roles[1], None)
80
+ prompt = conv.get_prompt()
81
+
82
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
83
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
84
+ keywords = [stop_str]
85
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
86
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
87
+
88
+ with torch.inference_mode():
89
+ output_ids = model.generate(
90
+ input_ids,
91
+ images=image_tensor,
92
+ do_sample=True,
93
+ temperature=0.2,
94
+ max_new_tokens=1024,
95
+ streamer=streamer,
96
+ use_cache=True,
97
+ eos_token_id=tokenizer.eos_token_id, # End of sequence token
98
+ pad_token_id=tokenizer.eos_token_id, # Pad token
99
+ stopping_criteria=[stopping_criteria])
100
+
101
+ outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
102
+ conv.messages[-1][-1] = outputs
103
+
104
+ if args.debug:
105
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
106
+
107
+
108
+ if __name__ == "__main__":
109
+ parser = argparse.ArgumentParser()
110
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
111
+ parser.add_argument("--model-base", type=str, default=None)
112
+ parser.add_argument("--image-file", type=str, required=True)
113
+ parser.add_argument("--num-gpus", type=int, default=1)
114
+ parser.add_argument("--conv-mode", type=str, default=None)
115
+ parser.add_argument("--temperature", type=float, default=0.2)
116
+ parser.add_argument("--max-new-tokens", type=int, default=512)
117
+ parser.add_argument("--load-8bit", action="store_true")
118
+ parser.add_argument("--load-4bit", action="store_true")
119
+ parser.add_argument("--debug", action="store_true")
120
+ args = parser.parse_args()
121
+ main(args)
llava-phi/llava_phi/serve/examples/extreme_ironing.jpg ADDED
llava-phi/llava_phi/serve/examples/waterview.jpg ADDED
llava-phi/llava_phi/train/convert_model2base_llava_phi.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
2
+ # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
3
+ # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+ import copy
19
+ from dataclasses import dataclass, field
20
+ import json
21
+ import logging
22
+ import pathlib
23
+ from typing import Dict, Optional, Sequence, List
24
+
25
+ import torch
26
+
27
+ import transformers
28
+
29
+ from llava_phi.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, \
30
+ DEFAULT_IM_END_TOKEN
31
+ from torch.utils.data import Dataset
32
+ from llava_phi.train.llava_phi_trainer import LLaVAPhiTrainer
33
+
34
+ from llava_phi import conversation as conversation_lib
35
+ from llava_phi.model import *
36
+ from llava_phi.mm_utils import tokenizer_image_token
37
+ from transformers import CLIPVisionConfig, CLIPImageProcessor
38
+
39
+ from PIL import Image
40
+
41
+ local_rank = None
42
+
43
+
44
+ def rank0_print(*args):
45
+ if local_rank == 0:
46
+ print(*args)
47
+
48
+
49
+ @dataclass
50
+ class ModelArguments:
51
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
52
+ version: Optional[str] = field(default="v0")
53
+ freeze_backbone: bool = field(default=False)
54
+ tune_mm_mlp_adapter: bool = field(default=False)
55
+ freeze_vision_tower: bool = field(default=False)
56
+ vision_tower: Optional[str] = field(default=None)
57
+ mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer
58
+ mm_vision_select_feature: Optional[str] = field(default="patch")
59
+ pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
60
+ mm_use_im_start_end: bool = field(default=False)
61
+ mm_use_im_patch_token: bool = field(default=True)
62
+
63
+
64
+
65
+ @dataclass
66
+ class ProjectorArguments:
67
+ mm_projector_type: Optional[str] = field(default='linear')
68
+
69
+
70
+ @dataclass
71
+ class DataArguments:
72
+ data_path: str = field(default=None,
73
+ metadata={"help": "Path to the training data."})
74
+ lazy_preprocess: bool = False
75
+ is_multimodal: bool = False
76
+ image_folder: Optional[str] = field(default=None)
77
+ image_aspect_ratio: str = 'square'
78
+
79
+
80
+ @dataclass
81
+ class TrainingArguments(transformers.TrainingArguments):
82
+ cache_dir: Optional[str] = field(default=None)
83
+ optim: str = field(default="adamw_torch")
84
+ adam_beta1: float = field(default=0.9)
85
+ adam_beta2: float = field(default=0.98)
86
+ adam_epsilon: float = field(default=1e-7)
87
+ remove_unused_columns: bool = field(default=False)
88
+
89
+ # freeze_mm_mlp_adapter: bool = field(default=False)
90
+ model_max_length: int = field(
91
+ default=512,
92
+ metadata={
93
+ "help":
94
+ "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
95
+ },
96
+ )
97
+ double_quant: bool = field(
98
+ default=True,
99
+ metadata={"help": "Compress the quantization statistics through double quantization."}
100
+ )
101
+ quant_type: str = field(
102
+ default="nf4",
103
+ metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
104
+ )
105
+ bits: int = field(
106
+ default=16,
107
+ metadata={"help": "How many bits to use."}
108
+ )
109
+ lora_enable: bool = False
110
+ lora_r: int = 64
111
+ lora_alpha: int = 16
112
+ lora_dropout: float = 0.05
113
+ lora_weight_path: str = ""
114
+ lora_bias: str = "none"
115
+ mm_projector_lr: Optional[float] = None
116
+ group_by_modality_length: bool = field(default=False)
117
+
118
+
119
+ def maybe_zero_3(param, ignore_status=False, name=None):
120
+ from deepspeed import zero
121
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
122
+ if hasattr(param, "ds_id"):
123
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
124
+ if not ignore_status:
125
+ logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
126
+ with zero.GatheredParameters([param]):
127
+ param = param.data.detach().cpu().clone()
128
+ else:
129
+ param = param.detach().cpu().clone()
130
+ return param
131
+
132
+
133
+ # Borrowed from peft.utils.get_peft_model_state_dict
134
+ def get_peft_state_maybe_zero_3(named_params, bias):
135
+ if bias == "none":
136
+ to_return = {k: t for k, t in named_params if "lora_" in k}
137
+ elif bias == "all":
138
+ to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
139
+ elif bias == "lora_only":
140
+ to_return = {}
141
+ maybe_lora_bias = {}
142
+ lora_bias_names = set()
143
+ for k, t in named_params:
144
+ if "lora_" in k:
145
+ to_return[k] = t
146
+ bias_name = k.split("lora_")[0] + "bias"
147
+ lora_bias_names.add(bias_name)
148
+ elif "bias" in k:
149
+ maybe_lora_bias[k] = t
150
+ for k, t in maybe_lora_bias:
151
+ if bias_name in lora_bias_names:
152
+ to_return[bias_name] = t
153
+ else:
154
+ raise NotImplementedError
155
+ to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
156
+ return to_return
157
+
158
+
159
+ def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
160
+ to_return = {k: t for k, t in named_params if "lora_" not in k}
161
+ if require_grad_only:
162
+ to_return = {k: t for k, t in to_return.items() if t.requires_grad}
163
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
164
+ return to_return
165
+
166
+
167
+ def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
168
+ to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
169
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
170
+ return to_return
171
+
172
+
173
+ def find_all_linear_names(model):
174
+ cls = torch.nn.Linear
175
+ lora_module_names = set()
176
+ multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler']
177
+ for name, module in model.named_modules():
178
+ if any(mm_keyword in name for mm_keyword in multimodal_keywords):
179
+ continue
180
+ if isinstance(module, cls):
181
+ names = name.split('.')
182
+ lora_module_names.add(names[0] if len(names) == 1 else names[-1])
183
+
184
+ if 'lm_head' in lora_module_names: # needed for 16-bit
185
+ lora_module_names.remove('lm_head')
186
+ return list(lora_module_names)
187
+
188
+
189
+ def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
190
+ output_dir: str):
191
+ """Collects the state dict and dump to disk."""
192
+
193
+ # if getattr(trainer.args, "tune_mm_mlp_adapter", False):
194
+ # # Only save Adapter
195
+ # keys_to_match = ['mm_projector']
196
+ # if getattr(trainer.args, "use_im_start_end", False):
197
+ # keys_to_match.extend(['embed_tokens', 'embed_in'])
198
+ #
199
+ # weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match)
200
+ # trainer.model.config.save_pretrained(output_dir)
201
+ #
202
+ # current_folder = output_dir.split('/')[-1]
203
+ # parent_folder = os.path.dirname(output_dir)
204
+ # if trainer.args.local_rank == 0 or trainer.args.local_rank == -1:
205
+ # if current_folder.startswith('checkpoint-'):
206
+ # mm_projector_folder = os.path.join(parent_folder, "mm_projector")
207
+ # os.makedirs(mm_projector_folder, exist_ok=True)
208
+ # torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
209
+ # else:
210
+ # torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
211
+ #
212
+ # if getattr(trainer.args, "freeze_vision_model", False):
213
+ # pass
214
+ # return
215
+
216
+ if trainer.deepspeed:
217
+ torch.cuda.synchronize()
218
+ trainer.save_model(output_dir)
219
+ return
220
+
221
+ state_dict = trainer.model.state_dict()
222
+ if trainer.args.should_save:
223
+ cpu_state_dict = {
224
+ key: value.cpu()
225
+ for key, value in state_dict.items()
226
+ }
227
+ del state_dict
228
+ trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
229
+
230
+
231
+ def smart_tokenizer_and_embedding_resize(
232
+ special_tokens_dict: Dict,
233
+ tokenizer: transformers.PreTrainedTokenizer,
234
+ model: transformers.PreTrainedModel,
235
+ ):
236
+ """Resize tokenizer and embedding.
237
+
238
+ Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
239
+ """
240
+ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
241
+ model.resize_token_embeddings(len(tokenizer))
242
+
243
+ if num_new_tokens > 0:
244
+ input_embeddings = model.get_input_embeddings().weight.data
245
+ output_embeddings = model.get_output_embeddings().weight.data
246
+
247
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
248
+ dim=0, keepdim=True)
249
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
250
+ dim=0, keepdim=True)
251
+
252
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
253
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
254
+
255
+
256
+ def _tokenize_fn(strings: Sequence[str],
257
+ tokenizer: transformers.PreTrainedTokenizer) -> Dict:
258
+ """Tokenize a list of strings."""
259
+ tokenized_list = [
260
+ tokenizer(
261
+ text,
262
+ return_tensors="pt",
263
+ padding="longest",
264
+ max_length=tokenizer.model_max_length,
265
+ truncation=True,
266
+ ) for text in strings
267
+ ]
268
+ input_ids = labels = [
269
+ tokenized.input_ids[0] for tokenized in tokenized_list
270
+ ]
271
+ input_ids_lens = labels_lens = [
272
+ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
273
+ for tokenized in tokenized_list
274
+ ]
275
+ return dict(
276
+ input_ids=input_ids,
277
+ labels=labels,
278
+ input_ids_lens=input_ids_lens,
279
+ labels_lens=labels_lens,
280
+ )
281
+
282
+
283
+ def _mask_targets(target, tokenized_lens, speakers):
284
+ # cur_idx = 0
285
+ cur_idx = tokenized_lens[0]
286
+ tokenized_lens = tokenized_lens[1:]
287
+ target[:cur_idx] = IGNORE_INDEX
288
+ for tokenized_len, speaker in zip(tokenized_lens, speakers):
289
+ if speaker == "human":
290
+ target[cur_idx + 2:cur_idx + tokenized_len] = IGNORE_INDEX
291
+ cur_idx += tokenized_len
292
+
293
+
294
+ def _add_speaker_and_signal(header, source, get_conversation=True):
295
+ """Add speaker and start/end signal on each round."""
296
+ BEGIN_SIGNAL = "### "
297
+ END_SIGNAL = "\n"
298
+ conversation = header
299
+ for sentence in source:
300
+ from_str = sentence["from"]
301
+ if from_str.lower() == "human":
302
+ from_str = conversation_lib.default_conversation.roles[0]
303
+ elif from_str.lower() == "gpt":
304
+ from_str = conversation_lib.default_conversation.roles[1]
305
+ else:
306
+ from_str = 'unknown'
307
+ sentence["value"] = (BEGIN_SIGNAL + from_str + ": " +
308
+ sentence["value"] + END_SIGNAL)
309
+ if get_conversation:
310
+ conversation += sentence["value"]
311
+ conversation += BEGIN_SIGNAL
312
+ return conversation
313
+
314
+
315
+ def preprocess_multimodal(
316
+ sources: Sequence[str],
317
+ data_args: DataArguments
318
+ ) -> Dict:
319
+ is_multimodal = data_args.is_multimodal
320
+ if not is_multimodal:
321
+ return sources
322
+
323
+ for source in sources:
324
+ for sentence in source:
325
+ if DEFAULT_IMAGE_TOKEN in sentence['value']:
326
+ sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
327
+ sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
328
+ sentence['value'] = sentence['value'].strip()
329
+ if "mmtag" in conversation_lib.default_conversation.version:
330
+ sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN,
331
+ '<Image>' + DEFAULT_IMAGE_TOKEN + '</Image>')
332
+ replace_token = DEFAULT_IMAGE_TOKEN
333
+ if data_args.mm_use_im_start_end:
334
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
335
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
336
+
337
+ return sources
338
+
339
+
340
+ def preprocess_v0(
341
+ sources,
342
+ tokenizer: transformers.PreTrainedTokenizer,
343
+ has_image: bool = False
344
+ ) -> Dict:
345
+ conv = conversation_lib.default_conversation.copy()
346
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
347
+
348
+ # Apply prompt templates
349
+ conversations = []
350
+ for i, source in enumerate(sources):
351
+ if roles[source[0]["from"]] != conv.roles[0]:
352
+ # Skip the first one if it is not from human
353
+ source = source[1:]
354
+
355
+ conv.messages = []
356
+ for j, sentence in enumerate(source):
357
+ role = roles[sentence["from"]]
358
+ assert role == conv.roles[j % 2], f"{i}"
359
+ conv.append_message(role, sentence["value"])
360
+ conversations.append(conv.get_prompt())
361
+
362
+ # Tokenize conversations
363
+ if has_image:
364
+ input_ids = torch.stack(
365
+ [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
366
+ else:
367
+ input_ids = tokenizer(
368
+ conversations,
369
+ return_tensors="pt",
370
+ padding="longest",
371
+ max_length=tokenizer.model_max_length,
372
+ truncation=True,
373
+ ).input_ids
374
+
375
+ targets = input_ids.clone()
376
+
377
+ assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
378
+
379
+ # Mask targets
380
+ sep = conv.sep + conv.roles[1] + ": "
381
+ for conversation, target in zip(conversations, targets):
382
+ total_len = int(target.ne(tokenizer.pad_token_id).sum()) + conversation.count(
383
+ conv.sep2) # in phi-2, pad_token_id == eos_token_id
384
+
385
+ rounds = conversation.split(conv.sep2)
386
+ cur_len = 0
387
+ if cur_len > 0:
388
+ target[:cur_len] = IGNORE_INDEX
389
+ for i, rou in enumerate(rounds):
390
+ if rou == "":
391
+ break
392
+
393
+ parts = rou.split(sep)
394
+ if len(parts) != 2:
395
+ break
396
+ parts[0] += sep
397
+
398
+ if has_image:
399
+ round_len = len(tokenizer_image_token(rou, tokenizer)) + 1 # +1 for <|endoftext|>
400
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer))
401
+ else:
402
+ round_len = len(tokenizer(rou).input_ids) + 1 # +1 for <|endoftext|>
403
+ instruction_len = len(tokenizer(parts[0]).input_ids)
404
+
405
+ target[cur_len: cur_len + instruction_len] = IGNORE_INDEX
406
+
407
+ cur_len += round_len
408
+ target[cur_len:] = IGNORE_INDEX
409
+
410
+ if cur_len < tokenizer.model_max_length:
411
+ if cur_len != total_len:
412
+ target[:] = IGNORE_INDEX
413
+ print(conversation)
414
+ print(
415
+ f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
416
+ f" (ignored)"
417
+ )
418
+
419
+ return dict(
420
+ input_ids=input_ids,
421
+ labels=targets,
422
+ )
423
+
424
+
425
+ def preprocess_plain(
426
+ sources: Sequence[str],
427
+ tokenizer: transformers.PreTrainedTokenizer,
428
+ ) -> Dict:
429
+ # add end signal and concatenate together
430
+ conversations = []
431
+ # print(sources)
432
+ # time.sleep(5)
433
+ for source in sources:
434
+ assert len(source) == 2
435
+ assert DEFAULT_IMAGE_TOKEN in source[0]['value']
436
+ source[0]['value'] = DEFAULT_IMAGE_TOKEN
437
+ conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep
438
+ conversations.append(conversation)
439
+ # tokenize conversations
440
+ # print(conversations)
441
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
442
+ targets = copy.deepcopy(input_ids)
443
+ for target, source in zip(targets, sources):
444
+ tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer))
445
+ target[:tokenized_len] = IGNORE_INDEX
446
+ return dict(input_ids=input_ids, labels=targets)
447
+
448
+
449
+ def preprocess(
450
+ sources: Sequence[str],
451
+ tokenizer: transformers.PreTrainedTokenizer,
452
+ has_image: bool = False
453
+ ) -> Dict:
454
+ """
455
+ Given a list of sources, each is a conversation list. This transform:
456
+ 1. Add signal '### ' at the beginning each sentence, with end signal '\n';
457
+ 2. Concatenate conversations together;
458
+ 3. Tokenize the concatenated conversation;
459
+ 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
460
+ """
461
+ if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
462
+ return preprocess_plain(sources, tokenizer)
463
+ if conversation_lib.default_conversation.version.startswith("v0"):
464
+ return preprocess_v0(sources, tokenizer, has_image=has_image)
465
+ # add end signal and concatenate together
466
+ conversations = []
467
+ for source in sources:
468
+ header = f"{conversation_lib.default_conversation.system}\n\n"
469
+ conversation = _add_speaker_and_signal(header, source)
470
+ conversations.append(conversation)
471
+
472
+ # tokenize conversations
473
+ def get_tokenize_len(prompts):
474
+ return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
475
+
476
+ if has_image:
477
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
478
+ else:
479
+ conversations_tokenized = _tokenize_fn(conversations, tokenizer)
480
+ input_ids = conversations_tokenized["input_ids"]
481
+
482
+ targets = copy.deepcopy(input_ids)
483
+ for target, source in zip(targets, sources):
484
+ if has_image:
485
+ tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
486
+ else:
487
+ tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
488
+ speakers = [sentence["from"] for sentence in source]
489
+ _mask_targets(target, tokenized_lens, speakers)
490
+
491
+ return dict(input_ids=input_ids, labels=targets)
492
+
493
+
494
+ class LazySupervisedDataset(Dataset):
495
+ """Dataset for supervised fine-tuning."""
496
+
497
+ def __init__(self, data_path: str,
498
+ tokenizer: transformers.PreTrainedTokenizer,
499
+ data_args: DataArguments):
500
+ super(LazySupervisedDataset, self).__init__()
501
+ list_data_dict = json.load(open(data_path, "r"))
502
+
503
+ rank0_print("Formatting inputs...Skip in lazy mode")
504
+ self.tokenizer = tokenizer
505
+ self.list_data_dict = list_data_dict
506
+ self.data_args = data_args
507
+
508
+ def __len__(self):
509
+ return len(self.list_data_dict)
510
+
511
+ @property
512
+ def lengths(self):
513
+ length_list = []
514
+ for sample in self.list_data_dict:
515
+ img_tokens = 128 if 'image' in sample else 0
516
+ length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)
517
+ return length_list
518
+
519
+ @property
520
+ def modality_lengths(self):
521
+ length_list = []
522
+ for sample in self.list_data_dict:
523
+ cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
524
+ cur_len = cur_len if 'image' in sample else -cur_len
525
+ length_list.append(cur_len)
526
+ return length_list
527
+
528
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
529
+ sources = self.list_data_dict[i]
530
+ if isinstance(i, int):
531
+ sources = [sources]
532
+ assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
533
+ if 'image' in sources[0]:
534
+ image_file = self.list_data_dict[i]['image']
535
+ image_folder = self.data_args.image_folder
536
+ processor = self.data_args.image_processor
537
+ image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
538
+ if self.data_args.image_aspect_ratio == 'pad':
539
+ def expand2square(pil_img, background_color):
540
+ width, height = pil_img.size
541
+ if width == height:
542
+ return pil_img
543
+ elif width > height:
544
+ result = Image.new(pil_img.mode, (width, width), background_color)
545
+ result.paste(pil_img, (0, (width - height) // 2))
546
+ return result
547
+ else:
548
+ result = Image.new(pil_img.mode, (height, height), background_color)
549
+ result.paste(pil_img, ((height - width) // 2, 0))
550
+ return result
551
+
552
+ image = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
553
+ image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
554
+ else:
555
+ image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
556
+ sources = preprocess_multimodal(
557
+ copy.deepcopy([e["conversations"] for e in sources]),
558
+ self.data_args)
559
+ else:
560
+ sources = copy.deepcopy([e["conversations"] for e in sources])
561
+ data_dict = preprocess(
562
+ sources,
563
+ self.tokenizer,
564
+ has_image=('image' in self.list_data_dict[i]))
565
+ if isinstance(i, int):
566
+ data_dict = dict(input_ids=data_dict["input_ids"][0],
567
+ labels=data_dict["labels"][0])
568
+
569
+ # image exist in the data
570
+ if 'image' in self.list_data_dict[i]:
571
+ data_dict['image'] = image
572
+ elif self.data_args.is_multimodal:
573
+ # image does not exist in the data, but the model is multimodal
574
+ crop_size = self.data_args.image_processor.crop_size
575
+ data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
576
+ return data_dict
577
+
578
+
579
+ @dataclass
580
+ class DataCollatorForSupervisedDataset(object):
581
+ """Collate examples for supervised fine-tuning."""
582
+
583
+ tokenizer: transformers.PreTrainedTokenizer
584
+
585
+ def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
586
+ input_ids, labels = tuple([instance[key] for instance in instances]
587
+ for key in ("input_ids", "labels"))
588
+ temp_pad_token_id = 51000
589
+ input_ids = torch.nn.utils.rnn.pad_sequence(
590
+ input_ids,
591
+ batch_first=True,
592
+ padding_value=temp_pad_token_id)
593
+ labels = torch.nn.utils.rnn.pad_sequence(labels,
594
+ batch_first=True,
595
+ padding_value=IGNORE_INDEX)
596
+ input_ids = input_ids[:, :self.tokenizer.model_max_length]
597
+ labels = labels[:, :self.tokenizer.model_max_length]
598
+ batch = dict(
599
+ input_ids=input_ids,
600
+ labels=labels,
601
+ attention_mask=input_ids.ne(temp_pad_token_id),
602
+ )
603
+
604
+ if 'image' in instances[0]:
605
+ images = [instance['image'] for instance in instances]
606
+ if all(x is not None and x.shape == images[0].shape for x in images):
607
+ batch['images'] = torch.stack(images)
608
+ else:
609
+ batch['images'] = images
610
+
611
+ return batch
612
+
613
+
614
+ def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
615
+ data_args) -> Dict:
616
+ """Make dataset and collator for supervised fine-tuning."""
617
+ train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
618
+ data_path=data_args.data_path,
619
+ data_args=data_args)
620
+ data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
621
+ return dict(train_dataset=train_dataset,
622
+ eval_dataset=None,
623
+ data_collator=data_collator)
624
+
625
+
626
+ def train():
627
+ global local_rank
628
+
629
+ parser = transformers.HfArgumentParser(
630
+ (ModelArguments, DataArguments, TrainingArguments, ProjectorArguments))
631
+ model_args, data_args, training_args, projector_args = parser.parse_args_into_dataclasses()
632
+ local_rank = training_args.local_rank
633
+ compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
634
+
635
+ bnb_model_from_pretrained_args = {}
636
+ if training_args.bits in [4, 8]:
637
+ from transformers import BitsAndBytesConfig
638
+ bnb_model_from_pretrained_args.update(dict(
639
+ device_map={"": training_args.device},
640
+ load_in_4bit=training_args.bits == 4,
641
+ load_in_8bit=training_args.bits == 8,
642
+ quantization_config=BitsAndBytesConfig(
643
+ load_in_4bit=training_args.bits == 4,
644
+ load_in_8bit=training_args.bits == 8,
645
+ llm_int8_skip_modules=["mm_projector"],
646
+ llm_int8_threshold=6.0,
647
+ llm_int8_has_fp16_weight=False,
648
+ bnb_4bit_compute_dtype=compute_dtype,
649
+ bnb_4bit_use_double_quant=training_args.double_quant,
650
+ bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
651
+ )
652
+ ))
653
+
654
+ if model_args.vision_tower is not None:
655
+ config = LlavaPhiConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
656
+ clip_config = CLIPVisionConfig.from_pretrained(model_args.vision_tower)
657
+ vis_config = LlavaPhiVisionConfig(**clip_config.to_dict())
658
+ config.vision_config["vision_tower"] = vis_config.to_dict()
659
+ config.vision_config["vision_tower"]["mm_vision_select_feature"] = model_args.mm_vision_select_feature
660
+ config.vision_config["vision_tower"]["mm_vision_select_layer"] = model_args.mm_vision_select_layer
661
+
662
+ config.vision_config["mm_projector"]["mm_projector_type"] = projector_args.mm_projector_type
663
+ config.vision_config["mm_projector"]["mm_hidden_size"] = vis_config.hidden_size
664
+ config.vision_config["mm_projector"]["hidden_size"] = config.hidden_size
665
+
666
+ model = LlavaPhiForCausalLM.from_pretrained(
667
+ model_args.model_name_or_path,
668
+ config=config,
669
+ cache_dir=training_args.cache_dir,
670
+ trust_remote_code=True,
671
+ **bnb_model_from_pretrained_args
672
+ )
673
+ rank0_print(model)
674
+ clip_model_param = torch.load(os.path.join(model_args.vision_tower, "pytorch_model.bin"), map_location='cpu')
675
+ model.get_model().vision_tower.load_state_dict(clip_model_param, strict=False)
676
+ else:
677
+ model = transformers.PhiForCausalLM.from_pretrained(
678
+ model_args.model_name_or_path,
679
+ cache_dir=training_args.cache_dir,
680
+ **bnb_model_from_pretrained_args
681
+ )
682
+ model.config.use_cache = False
683
+
684
+ if model_args.freeze_backbone:
685
+ model.model.requires_grad_(False)
686
+
687
+ if training_args.gradient_checkpointing:
688
+ if hasattr(model, "enable_input_require_grads"):
689
+ model.enable_input_require_grads()
690
+ else:
691
+ def make_inputs_require_grad(module, input, output):
692
+ output.requires_grad_(True)
693
+
694
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
695
+
696
+ if 'phi' in model_args.model_name_or_path:
697
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
698
+ model_args.model_name_or_path,
699
+ cache_dir=training_args.cache_dir,
700
+ model_max_length=training_args.model_max_length,
701
+ padding_side="right"
702
+ )
703
+ else:
704
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
705
+ model_args.model_name_or_path,
706
+ cache_dir=training_args.cache_dir,
707
+ model_max_length=training_args.model_max_length,
708
+ padding_side="right",
709
+ use_fast=False,
710
+ )
711
+
712
+ tokenizer.pad_token = tokenizer.unk_token
713
+ if model_args.version in conversation_lib.conv_templates:
714
+ conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
715
+ else:
716
+ conversation_lib.default_conversation = conversation_lib.conv_templates["phi-2_v0"]
717
+
718
+ assert model_args.vision_tower is not None, "llava_phi-phi only supports multi-modal models"
719
+ if model_args.vision_tower is not None:
720
+
721
+ vision_tower = model.get_vision_tower()
722
+ vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
723
+
724
+ data_args.image_processor = CLIPImageProcessor.from_pretrained(model_args.vision_tower)
725
+ data_args.is_multimodal = True
726
+
727
+ model.config.image_aspect_ratio = data_args.image_aspect_ratio
728
+ model.config.tokenizer_padding_side = tokenizer.padding_side
729
+ model.config.tokenizer_model_max_length = tokenizer.model_max_length
730
+
731
+ model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
732
+ model.requires_grad_(False)
733
+ if model_args.tune_mm_mlp_adapter:
734
+ for p in model.get_model().mm_projector.parameters():
735
+ p.requires_grad = True
736
+
737
+ model.config.freeze_vision_tower = training_args.freeze_vision_tower = model_args.freeze_vision_tower
738
+ if not model_args.freeze_vision_tower:
739
+ for p in model.get_model().vision_tower.parameters():
740
+ p.requires_grad = True
741
+
742
+ if training_args.bits in [4, 8]:
743
+ model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device)
744
+
745
+ model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
746
+ model.config.mm_projector_lr = training_args.mm_projector_lr
747
+ training_args.use_im_start_end = model_args.mm_use_im_start_end
748
+ model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
749
+ model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
750
+
751
+ data_module = make_supervised_data_module(tokenizer=tokenizer,
752
+ data_args=data_args)
753
+
754
+ trainer = LLaVAPhiTrainer(model=model,
755
+ tokenizer=tokenizer,
756
+ args=training_args,
757
+ **data_module)
758
+ # integrate the MLLM
759
+ trainer.save_state()
760
+
761
+ model.config.use_cache = True
762
+
763
+ safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
764
+
765
+
766
+ if __name__ == "__main__":
767
+ train()
llava-phi/llava_phi/train/llava_phi_trainer.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ from torch.utils.data import Sampler
5
+
6
+ from transformers import Trainer
7
+ from transformers.trainer import (
8
+ has_length,
9
+ )
10
+ from typing import List, Optional
11
+
12
+
13
+ def maybe_zero_3(param, ignore_status=False, name=None):
14
+ from deepspeed import zero
15
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
16
+ if hasattr(param, "ds_id"):
17
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
18
+ if not ignore_status:
19
+ print(name, 'no ignore status')
20
+ with zero.GatheredParameters([param]):
21
+ param = param.data.detach().cpu().clone()
22
+ else:
23
+ param = param.detach().cpu().clone()
24
+ return param
25
+
26
+
27
+ def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
28
+ to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
29
+ to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()}
30
+ return to_return
31
+
32
+
33
+ def split_to_even_chunks(indices, lengths, num_chunks):
34
+ """
35
+ Split a list of indices into `chunks` chunks of roughly equal lengths.
36
+ """
37
+
38
+ if len(indices) % num_chunks != 0:
39
+ return [indices[i::num_chunks] for i in range(num_chunks)]
40
+
41
+ num_indices_per_chunk = len(indices) // num_chunks
42
+
43
+ chunks = [[] for _ in range(num_chunks)]
44
+ chunks_lengths = [0 for _ in range(num_chunks)]
45
+ for index in indices:
46
+ shortest_chunk = chunks_lengths.index(min(chunks_lengths))
47
+ chunks[shortest_chunk].append(index)
48
+ chunks_lengths[shortest_chunk] += lengths[index]
49
+ if len(chunks[shortest_chunk]) == num_indices_per_chunk:
50
+ chunks_lengths[shortest_chunk] = float("inf")
51
+
52
+ return chunks
53
+
54
+
55
+ def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None):
56
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
57
+ assert all(l != 0 for l in lengths), "Should not have zero length."
58
+ # assert all(l > 0 for l in lengths) or all(l < 0 for l in lengths), "Should have only positive or negative lengths."
59
+
60
+ mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0])
61
+ lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0])
62
+
63
+ assert len(mm_indices) > 0, "Should have at least one multimodal sample."
64
+ assert len(lang_indices) > 0, "Should have at least one language sample."
65
+
66
+ mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)]
67
+ lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)]
68
+ megabatch_size = world_size * batch_size
69
+ mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)]
70
+ lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)]
71
+
72
+ last_mm = mm_megabatches[-1]
73
+ last_lang = lang_megabatches[-1]
74
+ additional_batch = last_mm + last_lang
75
+ megabatches = mm_megabatches[:-1] + lang_megabatches[:-1]
76
+ megabatch_indices = torch.randperm(len(megabatches), generator=generator)
77
+ megabatches = [megabatches[i] for i in megabatch_indices]
78
+
79
+ if len(additional_batch) >= megabatch_size:
80
+ megabatches = [additional_batch[:megabatch_size]] + megabatches
81
+ additional_batch = additional_batch[megabatch_size:]
82
+
83
+ if len(additional_batch) > 0:
84
+ megabatches.append(additional_batch)
85
+
86
+ return [i for megabatch in megabatches for i in megabatch]
87
+
88
+
89
+ def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True):
90
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
91
+ indices = torch.randperm(len(lengths), generator=generator)
92
+ megabatch_size = world_size * batch_size
93
+ megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
94
+ megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
95
+ megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches]
96
+
97
+ return [i for megabatch in megabatches for batch in megabatch for i in batch]
98
+
99
+
100
+ class LengthGroupedSampler(Sampler):
101
+ r"""
102
+ Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
103
+ keeping a bit of randomness.
104
+ """
105
+
106
+ def __init__(
107
+ self,
108
+ batch_size: int,
109
+ world_size: int,
110
+ lengths: Optional[List[int]] = None,
111
+ generator=None,
112
+ group_by_modality: bool = False,
113
+ ):
114
+ if lengths is None:
115
+ raise ValueError("Lengths must be provided.")
116
+
117
+ self.batch_size = batch_size
118
+ self.world_size = world_size
119
+ self.lengths = lengths
120
+ self.generator = generator
121
+ self.group_by_modality = group_by_modality
122
+
123
+ def __len__(self):
124
+ return len(self.lengths)
125
+
126
+ def __iter__(self):
127
+ if self.group_by_modality:
128
+ indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
129
+ else:
130
+ indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
131
+ return iter(indices)
132
+
133
+
134
+ class LLaVAPhiTrainer(Trainer):
135
+
136
+ def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
137
+ if self.train_dataset is None or not has_length(self.train_dataset):
138
+ return None
139
+
140
+ if self.args.group_by_modality_length:
141
+ lengths = self.train_dataset.modality_lengths
142
+ return LengthGroupedSampler(
143
+ # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps
144
+ self.args.train_batch_size,
145
+ world_size=self.args.world_size,
146
+ lengths=lengths,
147
+ group_by_modality=True,
148
+ )
149
+ else:
150
+ return super()._get_train_sampler()
151
+
152
+ def _save_checkpoint(self, model, trial, metrics=None):
153
+ super(LLaVAPhiTrainer, self)._save_checkpoint(model, trial, metrics)
154
+
155
+ def _save(self, output_dir: Optional[str] = None, state_dict=None):
156
+ super(LLaVAPhiTrainer, self)._save(output_dir, state_dict)