Spaces:
Running
on
Zero
Running
on
Zero
Benjamin-eecs
commited on
Commit
•
0637725
1
Parent(s):
15710ee
fix: minor fix
Browse files- app.py +8 -3
- app_modules/presets.py +11 -1
app.py
CHANGED
@@ -112,6 +112,11 @@ def generate_prompt_with_history(
|
|
112 |
else current_prompt
|
113 |
)
|
114 |
|
|
|
|
|
|
|
|
|
|
|
115 |
if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
|
116 |
return conversation_copy
|
117 |
|
@@ -389,7 +394,7 @@ def build_demo(MODELS):
|
|
389 |
)
|
390 |
max_length_tokens = gr.Slider(
|
391 |
minimum=0,
|
392 |
-
maximum=
|
393 |
value=2048,
|
394 |
step=8,
|
395 |
interactive=True,
|
@@ -397,8 +402,8 @@ def build_demo(MODELS):
|
|
397 |
)
|
398 |
max_context_length_tokens = gr.Slider(
|
399 |
minimum=0,
|
400 |
-
maximum=
|
401 |
-
value=
|
402 |
step=128,
|
403 |
interactive=True,
|
404 |
label="Max History Tokens",
|
|
|
112 |
else current_prompt
|
113 |
)
|
114 |
|
115 |
+
if current_prompt.count("<image_placeholder>") > 2:
|
116 |
+
for _ in range(len(conversation_copy.messages) - 2):
|
117 |
+
conversation_copy.messages.pop(0)
|
118 |
+
return conversation_copy
|
119 |
+
|
120 |
if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
|
121 |
return conversation_copy
|
122 |
|
|
|
394 |
)
|
395 |
max_length_tokens = gr.Slider(
|
396 |
minimum=0,
|
397 |
+
maximum=2048,
|
398 |
value=2048,
|
399 |
step=8,
|
400 |
interactive=True,
|
|
|
402 |
)
|
403 |
max_context_length_tokens = gr.Slider(
|
404 |
minimum=0,
|
405 |
+
maximum=2048,
|
406 |
+
value=2048,
|
407 |
step=128,
|
408 |
interactive=True,
|
409 |
label="Max History Tokens",
|
app_modules/presets.py
CHANGED
@@ -20,7 +20,17 @@
|
|
20 |
# -*- coding:utf-8 -*-
|
21 |
import gradio as gr
|
22 |
|
23 |
-
title = """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
description_top = """"""
|
25 |
description = """"""
|
26 |
CONCURRENT_COUNT = 10
|
|
|
20 |
# -*- coding:utf-8 -*-
|
21 |
import gradio as gr
|
22 |
|
23 |
+
title = """
|
24 |
+
<h1 align="left" style="min-width:200px; margin-top:0;">Chat with DeepSeek-VL</h1>
|
25 |
+
<p>
|
26 |
+
<a href="https://arxiv.org/abs/2403.05525" target="_blank">Paper</a> |
|
27 |
+
<a href="https://github.com/deepseek-ai/DeepSeek-VL" target="_blank">GitHub</a>
|
28 |
+
</p>
|
29 |
+
<p style="color: red;">
|
30 |
+
Note: Due to the limited memory of the A10 GPU, the demo supports a maximum of two images and 2048 tokens.
|
31 |
+
</p>
|
32 |
+
"""
|
33 |
+
|
34 |
description_top = """"""
|
35 |
description = """"""
|
36 |
CONCURRENT_COUNT = 10
|