Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
|
2 |
-
from transformers import StoppingCriteria, StoppingCriteriaList
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
import torch
|
@@ -9,161 +8,12 @@ from gradio import FileData
|
|
9 |
import time
|
10 |
import spaces
|
11 |
import re
|
12 |
-
import copy
|
13 |
-
|
14 |
ckpt = "Xkev/Llama-3.2V-11B-cot"
|
15 |
model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
16 |
torch_dtype=torch.bfloat16).to("cuda")
|
17 |
processor = AutoProcessor.from_pretrained(ckpt)
|
18 |
|
19 |
|
20 |
-
class StopOnStrings(StoppingCriteria):
|
21 |
-
def __init__(self, stop_strings, tokenizer):
|
22 |
-
self.stop_strings = stop_strings
|
23 |
-
self.tokenizer = tokenizer
|
24 |
-
|
25 |
-
def __call__(self, input_ids, scores, **kwargs):
|
26 |
-
generated_text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)
|
27 |
-
for stop_string in self.stop_strings:
|
28 |
-
if stop_string in generated_text:
|
29 |
-
return True
|
30 |
-
return False
|
31 |
-
|
32 |
-
def judge(image, prompt, outputs, type="summary"):
|
33 |
-
input_outputs = []
|
34 |
-
kwargs = dict(do_sample=True, max_new_tokens=2048, temperature=0.6, top_p=0.9)
|
35 |
-
|
36 |
-
hint = None
|
37 |
-
if type == "all":
|
38 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide better answers the question.'
|
39 |
-
recall_prompt = ""
|
40 |
-
for output in outputs:
|
41 |
-
input_outputs.append(output)
|
42 |
-
elif type == "sentence":
|
43 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide is a better next sentence for the answer to the question.'
|
44 |
-
recall_prompt = ""
|
45 |
-
for output in outputs:
|
46 |
-
sentences = output.split(".")
|
47 |
-
if len(sentences) > 2:
|
48 |
-
hint = ' '.join(sentences[:-2])
|
49 |
-
input_outputs.append(sentences[-2])
|
50 |
-
elif type == "summary":
|
51 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide better provides a summary of what it should do to solve the question. The summary should focus on outlining the main approach instead of stating specific analytical reasoning or math formula.'
|
52 |
-
recall_prompt = f'Please note that a better summary should focus on outlining the main approach instead of stating specific analytical reasoning or math formula.'
|
53 |
-
for output in outputs:
|
54 |
-
input_match = re.search(r'<SUMMARY>(.*?)</SUMMARY>', output, re.DOTALL)
|
55 |
-
if input_match:
|
56 |
-
input_outputs.append(input_match.group(1))
|
57 |
-
elif type == "caption":
|
58 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide better summarizes the information in the image related to the question, and has fewer errors. It is essential that the captions are as thorough as possible while remaining accurate, capturing as many details as possible rather than providing only general commentary.'
|
59 |
-
recall_prompt = f'Please note that a better caption should be as thorough as possible while remaining accurate, capturing as many details as possible rather than providing only general commentary.'
|
60 |
-
for output in outputs:
|
61 |
-
input_match = re.search(r'<CAPTION>(.*?)</CAPTION>', output, re.DOTALL)
|
62 |
-
if input_match:
|
63 |
-
hint_match = re.search(r'<SUMMARY>(.*?)</SUMMARY>', output, re.DOTALL)
|
64 |
-
if hint_match:
|
65 |
-
input_outputs.append(input_match.group(1))
|
66 |
-
elif type == "reasoning":
|
67 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide better explains the reasoning process to solve the question, and has fewer errors. Begin by thoroughly reviewing the question, followed by an in-depth examination of each answer individually, noting any differences. Subsequently, analyze these differences to determine which response demonstrates stronger reasoning and provide a clear conclusion.'
|
68 |
-
recall_prompt = f'Begin by thoroughly reviewing the question, followed by an in-depth examination of each answer individually, noting any differences. Subsequently, analyze these differences to determine which response demonstrates stronger reasoning and provide a clear conclusion.'
|
69 |
-
for output in outputs:
|
70 |
-
input_match = re.search(r'<REASONING>(.*?)</REASONING>', output, re.DOTALL)
|
71 |
-
if input_match:
|
72 |
-
hint_match = re.search(r'<SUMMARY>(.*?)</SUMMARY>', output, re.DOTALL)
|
73 |
-
if hint_match:
|
74 |
-
hint_caption_match = re.search(r'<CAPTION>(.*?)</CAPTION>', output, re.DOTALL)
|
75 |
-
if hint_caption_match:
|
76 |
-
hint = hint_caption_match.group(1)
|
77 |
-
input_outputs.append(input_match.group(1))
|
78 |
-
elif type == "conclusion":
|
79 |
-
judge_prompt = f'Now you act as a judge, helping me determine which of the two texts I provide offers a more effective conclusion to the question. The conclusion should align with the reasoning presented in the hint. The conclusion should never refuse to answer the question.'
|
80 |
-
recall_prompt = f'Please note that a better conclusion should align with the reasoning presented in the hint. The conclusion should never refuse to answer the question.'
|
81 |
-
for output in outputs:
|
82 |
-
input_match = re.search(r'<CONCLUSION>(.*?)</CONCLUSION>', output, re.DOTALL)
|
83 |
-
if input_match:
|
84 |
-
hint_match = re.search(r'<SUMMARY>(.*?)</SUMMARY>', output, re.DOTALL)
|
85 |
-
if hint_match:
|
86 |
-
hint_caption_match = re.search(r'<CAPTION>(.*?)</CAPTION>', output, re.DOTALL)
|
87 |
-
if hint_caption_match:
|
88 |
-
hint_reasoning_match = re.search(r'<REASONING>(.*?)</REASONING>', output, re.DOTALL)
|
89 |
-
if hint_reasoning_match:
|
90 |
-
hint = hint_caption_match.group(1) + hint_reasoning_match.group(1)
|
91 |
-
input_outputs.append(input_match.group(1))
|
92 |
-
|
93 |
-
if type == "reasoning":
|
94 |
-
reasoning_prompt = f"""Now you act as a judge, helping me determine whether the reasoning process in the given text is correct and accurate based on the given information.
|
95 |
-
You should assume that the given information about the image is correct.
|
96 |
-
You should only consider the reasoning process itself, not the correctness of the background information.
|
97 |
-
If the reasoning process invovles any calculations, you should verify the accuracy of the calculations.
|
98 |
-
You should output 'correct' if you don't find any errors in the reasoning process, and 'incorrect' if you find any errors."""
|
99 |
-
|
100 |
-
reasoning_prompt_1 = reasoning_prompt + f'\n\nGiven Information: {hint}' + f'\n\nReasoning Process: {input_outputs[0]}'
|
101 |
-
reasoning_message_1 = [
|
102 |
-
{'role': 'user', 'content': [
|
103 |
-
{'type': 'text', 'text': reasoning_prompt_1}
|
104 |
-
]}
|
105 |
-
]
|
106 |
-
reasoning_input_text_1 = processor.apply_chat_template(reasoning_message_1, add_generation_prompt=True)
|
107 |
-
reasoning_inputs_1 = processor(None, reasoning_input_text_1, return_tensors='pt')
|
108 |
-
reasoning_output_1 = model.generate(**reasoning_inputs_1, **kwargs)
|
109 |
-
reasoning_output_text_1 = processor.decode(reasoning_output_1[0][reasoning_inputs_1['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace('<|endoftext|>', '')
|
110 |
-
if "incorrect" in reasoning_output_text_1:
|
111 |
-
#logging
|
112 |
-
with open('log.jsonl', 'a') as f:
|
113 |
-
json_obj = {
|
114 |
-
"prompt": prompt,
|
115 |
-
"outputs": outputs,
|
116 |
-
"judge_output": reasoning_output_text_1
|
117 |
-
}
|
118 |
-
f.write(json.dumps(json_obj) + '\n')
|
119 |
-
return 1
|
120 |
-
|
121 |
-
reasoning_prompt_2 = reasoning_prompt + f'\n\nGiven Information: {hint}' + f'\n\nReasoning Process: {input_outputs[1]}'
|
122 |
-
reasoning_message_2 = [
|
123 |
-
{'role': 'user', 'content': [
|
124 |
-
{'type': 'text', 'text': reasoning_prompt_2}
|
125 |
-
]}
|
126 |
-
]
|
127 |
-
reasoning_input_text_2 = processor.apply_chat_template(reasoning_message_2, add_generation_prompt=True)
|
128 |
-
reasoning_inputs_2 = processor(None, reasoning_input_text_2, return_tensors='pt')
|
129 |
-
reasoning_output_2 = model.generate(**reasoning_inputs_2, **kwargs)
|
130 |
-
reasoning_output_text_2 = processor.decode(reasoning_output_2[0][reasoning_inputs_2['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace('<|endoftext|>', '')
|
131 |
-
if "incorrect" in reasoning_output_text_2:
|
132 |
-
#logging
|
133 |
-
with open('log.jsonl', 'a') as f:
|
134 |
-
json_obj = {
|
135 |
-
"prompt": prompt,
|
136 |
-
"outputs": outputs,
|
137 |
-
"judge_output": reasoning_output_text_2
|
138 |
-
}
|
139 |
-
f.write(json.dumps(json_obj) + '\n')
|
140 |
-
return 0
|
141 |
-
|
142 |
-
judge_prompt += f'\n\nQuestion: {prompt}'
|
143 |
-
if hint:
|
144 |
-
judge_prompt += f'\n\nHint about the Question: {hint}'
|
145 |
-
for i, output in enumerate(input_outputs):
|
146 |
-
judge_prompt += f'\nRepsonse {i+1}: {output}'
|
147 |
-
judge_prompt += f'\n\n{recall_prompt}'
|
148 |
-
judge_prompt += f' Please strictly follow the following format requirements when outputting, and don’t have any other unnecessary words.'
|
149 |
-
judge_prompt += f'\n\nOutput format: "Since [reason], I choose response [1/2]."'
|
150 |
-
|
151 |
-
judge_message = [
|
152 |
-
{'role': 'user', 'content': [
|
153 |
-
{'type': 'image'},
|
154 |
-
{'type': 'text', 'text': judge_prompt}
|
155 |
-
]}
|
156 |
-
]
|
157 |
-
judge_input_text = processor.apply_chat_template(judge_message, add_generation_prompt=True)
|
158 |
-
judge_inputs = processor(image, judge_input_text, return_tensors='pt')
|
159 |
-
judge_output = model.generate(**judge_inputs, **kwargs)
|
160 |
-
judge_output_text = processor.decode(judge_output[0][judge_inputs['input_ids'].shape[1]:]).replace('<|eot_id|>', '').replace('<|endoftext|>', '')
|
161 |
-
|
162 |
-
if "I choose response 1" in judge_output_text:
|
163 |
-
return 0
|
164 |
-
else:
|
165 |
-
return 1
|
166 |
-
|
167 |
@spaces.GPU
|
168 |
def bot_streaming(message, history, max_new_tokens=250):
|
169 |
|
@@ -210,64 +60,20 @@ def bot_streaming(message, history, max_new_tokens=250):
|
|
210 |
|
211 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.6, top_p=0.9)
|
212 |
generated_text = ""
|
213 |
-
|
214 |
-
stages = ['<SUMMARY>', '<CAPTION>', '<REASONING>', '<CONCLUSION>']
|
215 |
-
end_markers = ['</SUMMARY>', '</CAPTION>', '</REASONING>', '</CONCLUSION>']
|
216 |
-
|
217 |
-
initial_length = len(inputs['input_ids'][0])
|
218 |
-
input_ids = copy.deepcopy(inputs['input_ids'])
|
219 |
-
|
220 |
-
for stage, end_marker in zip(stages, end_markers):
|
221 |
-
stop_criteria = StoppingCriteriaList([StopOnStrings([end_marker], processor.tokenizer)])
|
222 |
-
|
223 |
-
candidates = []
|
224 |
-
for _ in range(2):
|
225 |
-
generation_kwargs.update({
|
226 |
-
'stopping_criteria': stop_criteria
|
227 |
-
})
|
228 |
-
|
229 |
-
inputs = processor(image, input_ids, return_tensors='pt')
|
230 |
-
output = model.generate(**inputs, **generation_kwargs)
|
231 |
-
|
232 |
-
new_generated_ids = output[0]
|
233 |
-
|
234 |
-
generated_text = processor.tokenizer.decode(new_generated_ids[initial_length:], skip_special_tokens=True)
|
235 |
-
|
236 |
-
candidates.append({
|
237 |
-
'input_ids': new_generated_ids.unsqueeze(0),
|
238 |
-
'generated_text': generated_text,
|
239 |
-
})
|
240 |
-
|
241 |
-
while(len(candidates) > 1):
|
242 |
-
candidate1 = candidates.pop(np.random.randint(len(candidates)))
|
243 |
-
candidate2 = candidates.pop(np.random.randint(len(candidates)))
|
244 |
-
outputs = [candidate1['generated_text'], candidate2['generated_text']]
|
245 |
-
best_index = judge(image, prompt, outputs, type=stage[1:-1].lower())
|
246 |
-
if best_index == 0:
|
247 |
-
candidates.append(candidate1)
|
248 |
-
else:
|
249 |
-
candidates.append(candidate2)
|
250 |
-
|
251 |
-
input_ids = candidates[0]['input_ids']
|
252 |
-
|
253 |
-
final_output = processor.tokenizer.decode(input_ids[0][initial_length:], skip_special_tokens=True)
|
254 |
-
final_output = re.sub(r"<(\w+)>", r"(Here begins the \1 stage)", final_output)
|
255 |
-
final_output = re.sub(r"</(\w+)>", r"(Here ends the \1 stage)", final_output)
|
256 |
-
return final_output
|
257 |
-
|
258 |
-
# thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
259 |
-
# thread.start()
|
260 |
-
# buffer = ""
|
261 |
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
266 |
|
267 |
-
|
268 |
-
|
269 |
|
270 |
-
|
271 |
|
272 |
|
273 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|
@@ -281,8 +87,7 @@ demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|
|
281 |
)
|
282 |
],
|
283 |
examples=[[{"text": "What is on the flower?", "files": ["./Example1.webp"]},512],
|
284 |
-
[{"text": "How to make this pastry?", "files": ["./Example2.png"]},512],
|
285 |
-
[{"text": f"Hint: Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.\n Question: Subtract all tiny shiny balls. Subtract all purple objects. How many objects are left?\n Options:\n A. 4\n B. 8\n C. 2\n D. 6", "files": ["./reasoning.png"]},2048]],
|
286 |
cache_examples=False,
|
287 |
description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit [our GitHub page](https://github.com/PKU-YuanGroup/LLaVA-CoT). Note: Since Gradio currently does not support displaying the special markings in the output, we have replaced it with the expression (Here begins the X phase).",
|
288 |
stop_btn="Stop Generation",
|
|
|
1 |
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
|
|
|
2 |
from PIL import Image
|
3 |
import requests
|
4 |
import torch
|
|
|
8 |
import time
|
9 |
import spaces
|
10 |
import re
|
|
|
|
|
11 |
ckpt = "Xkev/Llama-3.2V-11B-cot"
|
12 |
model = MllamaForConditionalGeneration.from_pretrained(ckpt,
|
13 |
torch_dtype=torch.bfloat16).to("cuda")
|
14 |
processor = AutoProcessor.from_pretrained(ckpt)
|
15 |
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
@spaces.GPU
|
18 |
def bot_streaming(message, history, max_new_tokens=250):
|
19 |
|
|
|
60 |
|
61 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.6, top_p=0.9)
|
62 |
generated_text = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
65 |
+
thread.start()
|
66 |
+
buffer = ""
|
67 |
+
|
68 |
+
for new_text in streamer:
|
69 |
+
buffer += new_text
|
70 |
+
generated_text_without_prompt = buffer
|
71 |
+
time.sleep(0.01)
|
72 |
|
73 |
+
buffer = re.sub(r"<(\w+)>", r"(Here begins the \1 stage)", buffer)
|
74 |
+
buffer = re.sub(r"</(\w+)>", r"(Here ends the \1 stage)", buffer)
|
75 |
|
76 |
+
yield buffer
|
77 |
|
78 |
|
79 |
demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA-CoT",
|
|
|
87 |
)
|
88 |
],
|
89 |
examples=[[{"text": "What is on the flower?", "files": ["./Example1.webp"]},512],
|
90 |
+
[{"text": "How to make this pastry?", "files": ["./Example2.png"]},512]],
|
|
|
91 |
cache_examples=False,
|
92 |
description="Upload an image, and start chatting about it. To learn more about LLaVA-CoT, visit [our GitHub page](https://github.com/PKU-YuanGroup/LLaVA-CoT). Note: Since Gradio currently does not support displaying the special markings in the output, we have replaced it with the expression (Here begins the X phase).",
|
93 |
stop_btn="Stop Generation",
|