Spaces:
Running
on
Zero
Running
on
Zero
yuexiang96
commited on
Commit
•
81cef0c
1
Parent(s):
ab13d2e
Update app.py
Browse files
app.py
CHANGED
@@ -86,6 +86,8 @@ print(f"Gradio-client version: {gradio_client.__version__}")
|
|
86 |
def get_conv_log_filename():
|
87 |
t = datetime.datetime.now()
|
88 |
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
|
|
|
|
|
89 |
return name
|
90 |
|
91 |
def get_conv_vote_filename():
|
@@ -249,10 +251,13 @@ def clear_history(history):
|
|
249 |
|
250 |
def add_message(history, message):
|
251 |
global chat_image_num
|
|
|
252 |
if not history:
|
253 |
history = []
|
|
|
254 |
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
255 |
chat_image_num = 0
|
|
|
256 |
|
257 |
if len(message["files"]) <= 1:
|
258 |
for x in message["files"]:
|
@@ -269,8 +274,8 @@ def add_message(history, message):
|
|
269 |
|
270 |
if message["text"] is not None:
|
271 |
history.append((message["text"], None))
|
272 |
-
|
273 |
-
print(f"### Chatbot instance ID: {id(our_chatbot)}")
|
274 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
275 |
else:
|
276 |
for x in message["files"]:
|
@@ -341,22 +346,26 @@ def bot(history, temperature, top_p, max_output_tokens):
|
|
341 |
image_list.append(load_image(f))
|
342 |
else:
|
343 |
raise ValueError("Invalid image file")
|
344 |
-
|
345 |
-
image_tensor = [
|
346 |
-
|
347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
]
|
349 |
-
.half()
|
350 |
-
.to(our_chatbot.model.device)
|
351 |
-
for f in image_list
|
352 |
-
]
|
353 |
-
|
354 |
-
|
355 |
-
image_tensor = torch.stack(image_tensor)
|
356 |
-
image_token = DEFAULT_IMAGE_TOKEN * num_new_images
|
357 |
|
358 |
-
|
359 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[0], inp)
|
361 |
# image = None
|
362 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
|
@@ -381,12 +390,12 @@ def bot(history, temperature, top_p, max_output_tokens):
|
|
381 |
)
|
382 |
print(our_chatbot.model.device)
|
383 |
print(input_ids.device)
|
384 |
-
print(image_tensor.device)
|
385 |
|
386 |
generate_kwargs = dict(
|
387 |
inputs=input_ids,
|
388 |
streamer=streamer,
|
389 |
-
images=image_tensor,
|
390 |
do_sample=True,
|
391 |
temperature=temperature,
|
392 |
top_p=top_p,
|
|
|
86 |
def get_conv_log_filename():
|
87 |
t = datetime.datetime.now()
|
88 |
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
|
89 |
+
if not os.path.isfile(name):
|
90 |
+
os.makedirs(os.path.dirname(name), exist_ok=True)
|
91 |
return name
|
92 |
|
93 |
def get_conv_vote_filename():
|
|
|
251 |
|
252 |
def add_message(history, message):
|
253 |
global chat_image_num
|
254 |
+
print("#### len(history)",len(history))
|
255 |
if not history:
|
256 |
history = []
|
257 |
+
print("### Initialize chatbot")
|
258 |
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
259 |
chat_image_num = 0
|
260 |
+
print("chat_image_num", chat_image_num)
|
261 |
|
262 |
if len(message["files"]) <= 1:
|
263 |
for x in message["files"]:
|
|
|
274 |
|
275 |
if message["text"] is not None:
|
276 |
history.append((message["text"], None))
|
277 |
+
print("chat_image_num", chat_image_num)
|
278 |
+
# print(f"### Chatbot instance ID: {id(our_chatbot)}")
|
279 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
280 |
else:
|
281 |
for x in message["files"]:
|
|
|
346 |
image_list.append(load_image(f))
|
347 |
else:
|
348 |
raise ValueError("Invalid image file")
|
349 |
+
|
350 |
+
image_tensor = []
|
351 |
+
if num_new_images > 0:
|
352 |
+
image_tensor = [
|
353 |
+
our_chatbot.image_processor.preprocess(f, return_tensors="pt")["pixel_values"][
|
354 |
+
0
|
355 |
+
]
|
356 |
+
.half()
|
357 |
+
.to(our_chatbot.model.device)
|
358 |
+
for f in image_list
|
359 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
|
361 |
+
|
362 |
+
image_tensor = torch.stack(image_tensor)
|
363 |
+
image_token = DEFAULT_IMAGE_TOKEN * num_new_images
|
364 |
+
|
365 |
+
inp = text
|
366 |
+
inp = image_token + "\n" + inp
|
367 |
+
else:
|
368 |
+
inp = text
|
369 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[0], inp)
|
370 |
# image = None
|
371 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
|
|
|
390 |
)
|
391 |
print(our_chatbot.model.device)
|
392 |
print(input_ids.device)
|
393 |
+
# print(image_tensor.device)
|
394 |
|
395 |
generate_kwargs = dict(
|
396 |
inputs=input_ids,
|
397 |
streamer=streamer,
|
398 |
+
images=image_tensor if num_new_images > 0 else None,
|
399 |
do_sample=True,
|
400 |
temperature=temperature,
|
401 |
top_p=top_p,
|