Spaces:
Running
Running
Added warning
Browse files
app.py
CHANGED
@@ -62,31 +62,34 @@ def bot(history_chat, text_input, image,
|
|
62 |
max_length,
|
63 |
min_length,
|
64 |
top_p):
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
70 |
temperature,
|
71 |
length_penalty,
|
72 |
repetition_penalty,
|
73 |
max_length,
|
74 |
min_length,
|
75 |
top_p)
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
yield chat_state_list
|
90 |
|
91 |
|
92 |
css = """
|
|
|
62 |
max_length,
|
63 |
min_length,
|
64 |
top_p):
|
65 |
+
|
66 |
+
if text_input == "":
|
67 |
+
gr.Warning("Please input text")
|
68 |
+
|
69 |
+
if image==None:
|
70 |
+
gr.Warning("Please input image or wait for image to be uploaded before clicking submit.")
|
71 |
+
chat_history = " ".join(history_chat) # history as a str to be passed to model
|
72 |
+
chat_history = chat_history + f"USER: <image>\n{text_input}\nASSISTANT:" # add text input for prompting
|
73 |
+
inference_result = infer(image, chat_history,
|
74 |
temperature,
|
75 |
length_penalty,
|
76 |
repetition_penalty,
|
77 |
max_length,
|
78 |
min_length,
|
79 |
top_p)
|
80 |
+
# return inference and parse for new history
|
81 |
+
chat_val = extract_response_pairs(inference_result)
|
82 |
+
|
83 |
+
# create history list for yielding the last inference response
|
84 |
+
chat_state_list = copy.deepcopy(chat_val)
|
85 |
+
chat_state_list[-1][1] = "" # empty last response
|
86 |
+
|
87 |
+
# add characters iteratively
|
88 |
+
for character in chat_val[-1][1]:
|
89 |
+
chat_state_list[-1][1] += character
|
90 |
+
time.sleep(0.05)
|
91 |
+
# yield history but with last response being streamed
|
92 |
+
yield chat_state_list
|
|
|
93 |
|
94 |
|
95 |
css = """
|