aaaaa
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ stop_inference = False
|
|
| 13 |
|
| 14 |
def respond(
|
| 15 |
message,
|
| 16 |
-
|
| 17 |
system_message,
|
| 18 |
max_tokens,
|
| 19 |
temperature,
|
|
@@ -26,11 +26,11 @@ def respond(
|
|
| 26 |
if use_local_model:
|
| 27 |
# Simulate local inference (ignoring history)
|
| 28 |
messages = [{"role": "system", "content": system_message}]
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
messages.append({"role": "user", "content": message})
|
| 35 |
|
| 36 |
response = ""
|
|
@@ -48,11 +48,11 @@ def respond(
|
|
| 48 |
else:
|
| 49 |
# API-based inference (ignoring history)
|
| 50 |
messages = [{"role": "system", "content": system_message}]
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
messages.append({"role": "user", "content": message})
|
| 57 |
|
| 58 |
response = ""
|
|
@@ -140,21 +140,22 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 140 |
|
| 141 |
cancel_button = gr.Button("Cancel Inference", variant="danger")
|
| 142 |
|
| 143 |
-
def chat_fn(message):
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
return full_response
|
|
|
|
| 158 |
|
| 159 |
user_input.submit(chat_fn, inputs=user_input, outputs=chat_history)
|
| 160 |
cancel_button.click(cancel_inference)
|
|
|
|
| 13 |
|
| 14 |
def respond(
|
| 15 |
message,
|
| 16 |
+
history: list[tuple[str, str]],
|
| 17 |
system_message,
|
| 18 |
max_tokens,
|
| 19 |
temperature,
|
|
|
|
| 26 |
if use_local_model:
|
| 27 |
# Simulate local inference (ignoring history)
|
| 28 |
messages = [{"role": "system", "content": system_message}]
|
| 29 |
+
for val in history:
|
| 30 |
+
if val[0]:
|
| 31 |
+
messages.append({"role": "user", "content": val[0]})
|
| 32 |
+
if val[1]:
|
| 33 |
+
messages.append({"role": "assistant", "content": val[1]})
|
| 34 |
messages.append({"role": "user", "content": message})
|
| 35 |
|
| 36 |
response = ""
|
|
|
|
| 48 |
else:
|
| 49 |
# API-based inference (ignoring history)
|
| 50 |
messages = [{"role": "system", "content": system_message}]
|
| 51 |
+
for val in history:
|
| 52 |
+
if val[0]:
|
| 53 |
+
messages.append({"role": "user", "content": val[0]})
|
| 54 |
+
if val[1]:
|
| 55 |
+
messages.append({"role": "assistant", "content": val[1]})
|
| 56 |
messages.append({"role": "user", "content": message})
|
| 57 |
|
| 58 |
response = ""
|
|
|
|
| 140 |
|
| 141 |
cancel_button = gr.Button("Cancel Inference", variant="danger")
|
| 142 |
|
| 143 |
+
# def chat_fn(message):
|
| 144 |
+
# response_gen = respond(
|
| 145 |
+
# message,
|
| 146 |
+
# # history: list[tuple[str, str]],
|
| 147 |
+
# system_message.value,
|
| 148 |
+
# max_tokens.value,
|
| 149 |
+
# temperature.value,
|
| 150 |
+
# top_p.value,
|
| 151 |
+
# use_local_model.value,
|
| 152 |
+
# )
|
| 153 |
+
# full_response = ""
|
| 154 |
+
# for response in response_gen:
|
| 155 |
+
# full_response += response # Accumulate the full response
|
| 156 |
+
|
| 157 |
+
# return full_response
|
| 158 |
+
chat_fn = respond
|
| 159 |
|
| 160 |
user_input.submit(chat_fn, inputs=user_input, outputs=chat_history)
|
| 161 |
cancel_button.click(cancel_inference)
|