Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,11 @@
|
|
| 1 |
import openai, gradio as gr, json, plotly.graph_objects as go
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
-
# ---
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
SYSTEM_PROMPT = """
|
| 8 |
You are ZEN Multimodal Assistant by ZEN AI Co.
|
|
@@ -35,12 +38,11 @@ def multimodal_chat(api_key, user_msg, history):
|
|
| 35 |
)
|
| 36 |
assistant_content = response.choices[0].message.content.strip()
|
| 37 |
|
| 38 |
-
# Defensive: Never show the word "text" alone
|
| 39 |
if assistant_content.lower() == "text":
|
| 40 |
assistant_content = "(I'm sorry, I didn't understand. Could you rephrase?)"
|
| 41 |
|
| 42 |
img_url, fig = None, None
|
| 43 |
-
try:
|
| 44 |
parsed = json.loads(assistant_content)
|
| 45 |
if parsed.get("type") == "image":
|
| 46 |
dalle = openai.images.generate(
|
|
@@ -65,10 +67,8 @@ def multimodal_chat(api_key, user_msg, history):
|
|
| 65 |
fig.update_layout(title=parsed.get("title", "Chart"))
|
| 66 |
history.append([user_msg, parsed.get("title", "Chart below")])
|
| 67 |
else:
|
| 68 |
-
# If unexpected JSON, fallback to text
|
| 69 |
history.append([user_msg, str(assistant_content)])
|
| 70 |
except (json.JSONDecodeError, KeyError, TypeError):
|
| 71 |
-
# If not JSON, treat as text
|
| 72 |
history.append([user_msg, assistant_content])
|
| 73 |
|
| 74 |
return history, img_url, fig
|
|
|
|
| 1 |
import openai, gradio as gr, json, plotly.graph_objects as go
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
+
# --- Try loading CSS, fallback to "" if not found ---
|
| 5 |
+
try:
|
| 6 |
+
CUSTOM_CSS = Path("style.css").read_text()
|
| 7 |
+
except Exception:
|
| 8 |
+
CUSTOM_CSS = ""
|
| 9 |
|
| 10 |
SYSTEM_PROMPT = """
|
| 11 |
You are ZEN Multimodal Assistant by ZEN AI Co.
|
|
|
|
| 38 |
)
|
| 39 |
assistant_content = response.choices[0].message.content.strip()
|
| 40 |
|
|
|
|
| 41 |
if assistant_content.lower() == "text":
|
| 42 |
assistant_content = "(I'm sorry, I didn't understand. Could you rephrase?)"
|
| 43 |
|
| 44 |
img_url, fig = None, None
|
| 45 |
+
try:
|
| 46 |
parsed = json.loads(assistant_content)
|
| 47 |
if parsed.get("type") == "image":
|
| 48 |
dalle = openai.images.generate(
|
|
|
|
| 67 |
fig.update_layout(title=parsed.get("title", "Chart"))
|
| 68 |
history.append([user_msg, parsed.get("title", "Chart below")])
|
| 69 |
else:
|
|
|
|
| 70 |
history.append([user_msg, str(assistant_content)])
|
| 71 |
except (json.JSONDecodeError, KeyError, TypeError):
|
|
|
|
| 72 |
history.append([user_msg, assistant_content])
|
| 73 |
|
| 74 |
return history, img_url, fig
|