Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,12 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import re
|
4 |
from groq import Groq
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
def validate_api_key(api_key):
|
7 |
"""Validate if the API key has the correct format."""
|
@@ -32,31 +38,81 @@ def test_api_connection(api_key):
|
|
32 |
else:
|
33 |
return False, f"Error connecting to Groq API: {str(e)}"
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
is_valid, message = validate_api_key(api_key)
|
41 |
if not is_valid:
|
42 |
return chat_history + [[user_message, f"Error: {message}"]]
|
43 |
|
44 |
-
# Test API connection
|
45 |
connection_valid, connection_message = test_api_connection(api_key)
|
46 |
if not connection_valid:
|
47 |
return chat_history + [[user_message, f"Error: {connection_message}"]]
|
48 |
|
49 |
try:
|
50 |
-
# Format history
|
51 |
messages = []
|
|
|
|
|
|
|
|
|
52 |
for human, assistant in chat_history:
|
53 |
messages.append({"role": "user", "content": human})
|
54 |
messages.append({"role": "assistant", "content": assistant})
|
55 |
|
56 |
-
# Add the current message
|
57 |
messages.append({"role": "user", "content": user_message})
|
58 |
|
59 |
-
#
|
60 |
client = Groq(api_key=api_key)
|
61 |
response = client.chat.completions.create(
|
62 |
model=model,
|
@@ -66,10 +122,22 @@ def chat_with_groq(api_key, model, user_message, temperature, max_tokens, top_p,
|
|
66 |
top_p=top_p
|
67 |
)
|
68 |
|
69 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
assistant_response = response.choices[0].message.content
|
71 |
|
72 |
-
# Return updated chat history
|
73 |
return chat_history + [[user_message, assistant_response]]
|
74 |
|
75 |
except Exception as e:
|
@@ -80,6 +148,101 @@ def clear_conversation():
|
|
80 |
"""Clear the conversation history."""
|
81 |
return []
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
# Define available models
|
84 |
models = [
|
85 |
"llama3-70b-8192",
|
@@ -89,102 +252,133 @@ models = [
|
|
89 |
"allam-2-7b"
|
90 |
]
|
91 |
|
|
|
|
|
|
|
92 |
# Create the Gradio interface
|
93 |
with gr.Blocks(title="Groq AI Chat Playground") as app:
|
94 |
gr.Markdown("# Groq AI Chat Playground")
|
95 |
-
|
96 |
-
#
|
97 |
-
with gr.
|
98 |
-
gr.
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
with gr.Row():
|
137 |
-
with gr.Column(scale=2):
|
138 |
-
api_key_input = gr.Textbox(
|
139 |
-
label="Groq API Key",
|
140 |
-
placeholder="Enter your Groq API key (starts with gsk_)",
|
141 |
-
type="password"
|
142 |
-
)
|
143 |
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
with gr.
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
)
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
# Connect components with functions
|
186 |
submit_button.click(
|
187 |
-
fn=
|
188 |
inputs=[
|
189 |
api_key_input,
|
190 |
model_dropdown,
|
@@ -192,7 +386,8 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
|
|
192 |
temperature_slider,
|
193 |
max_tokens_slider,
|
194 |
top_p_slider,
|
195 |
-
chatbot
|
|
|
196 |
],
|
197 |
outputs=chatbot
|
198 |
).then(
|
@@ -202,7 +397,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
|
|
202 |
)
|
203 |
|
204 |
message_input.submit(
|
205 |
-
fn=
|
206 |
inputs=[
|
207 |
api_key_input,
|
208 |
model_dropdown,
|
@@ -210,7 +405,8 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
|
|
210 |
temperature_slider,
|
211 |
max_tokens_slider,
|
212 |
top_p_slider,
|
213 |
-
chatbot
|
|
|
214 |
],
|
215 |
outputs=chatbot
|
216 |
).then(
|
@@ -230,6 +426,12 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
|
|
230 |
inputs=[api_key_input],
|
231 |
outputs=[api_status]
|
232 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
|
234 |
# Launch the app
|
235 |
if __name__ == "__main__":
|
|
|
2 |
import os
|
3 |
import re
|
4 |
from groq import Groq
|
5 |
+
import pandas as pd
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import io
|
8 |
+
import base64
|
9 |
+
from datetime import datetime, timedelta
|
10 |
+
import json
|
11 |
|
12 |
def validate_api_key(api_key):
|
13 |
"""Validate if the API key has the correct format."""
|
|
|
38 |
else:
|
39 |
return False, f"Error connecting to Groq API: {str(e)}"
|
40 |
|
41 |
+
# Ensure analytics directory exists
|
42 |
+
os.makedirs("analytics", exist_ok=True)
|
43 |
+
|
44 |
+
def log_chat_interaction(model, tokens_used, response_time, user_message_length):
|
45 |
+
"""Log chat interactions for analytics"""
|
46 |
+
timestamp = datetime.now().isoformat()
|
47 |
+
|
48 |
+
log_file = "analytics/chat_log.json"
|
49 |
+
|
50 |
+
log_entry = {
|
51 |
+
"timestamp": timestamp,
|
52 |
+
"model": model,
|
53 |
+
"tokens_used": tokens_used,
|
54 |
+
"response_time_sec": response_time,
|
55 |
+
"user_message_length": user_message_length
|
56 |
+
}
|
57 |
+
|
58 |
+
# Append to existing log or create new file
|
59 |
+
if os.path.exists(log_file):
|
60 |
+
try:
|
61 |
+
with open(log_file, "r") as f:
|
62 |
+
logs = json.load(f)
|
63 |
+
except:
|
64 |
+
logs = []
|
65 |
+
else:
|
66 |
+
logs = []
|
67 |
+
|
68 |
+
logs.append(log_entry)
|
69 |
+
|
70 |
+
with open(log_file, "w") as f:
|
71 |
+
json.dump(logs, f, indent=2)
|
72 |
+
|
73 |
+
def get_template_prompt(template_name):
|
74 |
+
"""Get system prompt for a given template name"""
|
75 |
+
templates = {
|
76 |
+
"": "", # Empty template
|
77 |
+
"General Assistant": "You are a helpful, harmless, and honest AI assistant.",
|
78 |
+
"Code Helper": "You are a programming assistant. Provide detailed code explanations and examples.",
|
79 |
+
"Creative Writer": "You are a creative writing assistant. Generate engaging and imaginative content.",
|
80 |
+
"Technical Expert": "You are a technical expert. Provide accurate, detailed technical information.",
|
81 |
+
"Data Analyst": "You are a data analysis assistant. Help interpret and analyze data effectively."
|
82 |
+
}
|
83 |
+
|
84 |
+
return templates.get(template_name, "")
|
85 |
+
|
86 |
+
def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_tokens, top_p, chat_history, template_name=""):
|
87 |
+
"""Enhanced chat function with analytics logging"""
|
88 |
+
start_time = datetime.now()
|
89 |
+
|
90 |
+
# Get system prompt if template is provided
|
91 |
+
system_prompt = get_template_prompt(template_name) if template_name else ""
|
92 |
+
|
93 |
+
# Validate and process as before
|
94 |
is_valid, message = validate_api_key(api_key)
|
95 |
if not is_valid:
|
96 |
return chat_history + [[user_message, f"Error: {message}"]]
|
97 |
|
|
|
98 |
connection_valid, connection_message = test_api_connection(api_key)
|
99 |
if not connection_valid:
|
100 |
return chat_history + [[user_message, f"Error: {connection_message}"]]
|
101 |
|
102 |
try:
|
103 |
+
# Format history
|
104 |
messages = []
|
105 |
+
|
106 |
+
if system_prompt:
|
107 |
+
messages.append({"role": "system", "content": system_prompt})
|
108 |
+
|
109 |
for human, assistant in chat_history:
|
110 |
messages.append({"role": "user", "content": human})
|
111 |
messages.append({"role": "assistant", "content": assistant})
|
112 |
|
|
|
113 |
messages.append({"role": "user", "content": user_message})
|
114 |
|
115 |
+
# Make API call
|
116 |
client = Groq(api_key=api_key)
|
117 |
response = client.chat.completions.create(
|
118 |
model=model,
|
|
|
122 |
top_p=top_p
|
123 |
)
|
124 |
|
125 |
+
# Calculate metrics
|
126 |
+
end_time = datetime.now()
|
127 |
+
response_time = (end_time - start_time).total_seconds()
|
128 |
+
tokens_used = response.usage.total_tokens
|
129 |
+
|
130 |
+
# Log the interaction
|
131 |
+
log_chat_interaction(
|
132 |
+
model=model,
|
133 |
+
tokens_used=tokens_used,
|
134 |
+
response_time=response_time,
|
135 |
+
user_message_length=len(user_message)
|
136 |
+
)
|
137 |
+
|
138 |
+
# Extract response
|
139 |
assistant_response = response.choices[0].message.content
|
140 |
|
|
|
141 |
return chat_history + [[user_message, assistant_response]]
|
142 |
|
143 |
except Exception as e:
|
|
|
148 |
"""Clear the conversation history."""
|
149 |
return []
|
150 |
|
151 |
+
def generate_analytics():
|
152 |
+
"""Generate analytics from the chat log"""
|
153 |
+
log_file = "analytics/chat_log.json"
|
154 |
+
|
155 |
+
if not os.path.exists(log_file):
|
156 |
+
return "No analytics data available yet.", None, None, None, None
|
157 |
+
|
158 |
+
try:
|
159 |
+
with open(log_file, "r") as f:
|
160 |
+
logs = json.load(f)
|
161 |
+
|
162 |
+
if not logs:
|
163 |
+
return "No analytics data available yet.", None, None, None, None
|
164 |
+
|
165 |
+
# Convert to DataFrame
|
166 |
+
df = pd.DataFrame(logs)
|
167 |
+
df["timestamp"] = pd.to_datetime(df["timestamp"])
|
168 |
+
|
169 |
+
# Generate usage by model chart
|
170 |
+
model_usage = df.groupby("model").agg({
|
171 |
+
"tokens_used": "sum",
|
172 |
+
"timestamp": "count"
|
173 |
+
}).reset_index()
|
174 |
+
model_usage.columns = ["model", "total_tokens", "request_count"]
|
175 |
+
|
176 |
+
plt.figure(figsize=(10, 6))
|
177 |
+
plt.bar(model_usage["model"], model_usage["total_tokens"])
|
178 |
+
plt.title("Token Usage by Model")
|
179 |
+
plt.xlabel("Model")
|
180 |
+
plt.ylabel("Total Tokens Used")
|
181 |
+
plt.xticks(rotation=45)
|
182 |
+
|
183 |
+
model_usage_img = io.BytesIO()
|
184 |
+
plt.savefig(model_usage_img, format="png", bbox_inches="tight")
|
185 |
+
model_usage_img.seek(0)
|
186 |
+
model_usage_b64 = base64.b64encode(model_usage_img.read()).decode("utf-8")
|
187 |
+
plt.close()
|
188 |
+
|
189 |
+
# Generate usage over time chart
|
190 |
+
df["date"] = df["timestamp"].dt.date
|
191 |
+
daily_usage = df.groupby("date").agg({
|
192 |
+
"tokens_used": "sum"
|
193 |
+
}).reset_index()
|
194 |
+
|
195 |
+
plt.figure(figsize=(10, 6))
|
196 |
+
plt.plot(daily_usage["date"], daily_usage["tokens_used"], marker="o")
|
197 |
+
plt.title("Daily Token Usage")
|
198 |
+
plt.xlabel("Date")
|
199 |
+
plt.ylabel("Tokens Used")
|
200 |
+
plt.grid(True)
|
201 |
+
|
202 |
+
daily_usage_img = io.BytesIO()
|
203 |
+
plt.savefig(daily_usage_img, format="png", bbox_inches="tight")
|
204 |
+
daily_usage_img.seek(0)
|
205 |
+
daily_usage_b64 = base64.b64encode(daily_usage_img.read()).decode("utf-8")
|
206 |
+
plt.close()
|
207 |
+
|
208 |
+
# Generate response time chart
|
209 |
+
model_response_time = df.groupby("model").agg({
|
210 |
+
"response_time_sec": "mean"
|
211 |
+
}).reset_index()
|
212 |
+
|
213 |
+
plt.figure(figsize=(10, 6))
|
214 |
+
plt.bar(model_response_time["model"], model_response_time["response_time_sec"])
|
215 |
+
plt.title("Average Response Time by Model")
|
216 |
+
plt.xlabel("Model")
|
217 |
+
plt.ylabel("Response Time (seconds)")
|
218 |
+
plt.xticks(rotation=45)
|
219 |
+
|
220 |
+
response_time_img = io.BytesIO()
|
221 |
+
plt.savefig(response_time_img, format="png", bbox_inches="tight")
|
222 |
+
response_time_img.seek(0)
|
223 |
+
response_time_b64 = base64.b64encode(response_time_img.read()).decode("utf-8")
|
224 |
+
plt.close()
|
225 |
+
|
226 |
+
# Summary statistics
|
227 |
+
total_tokens = df["tokens_used"].sum()
|
228 |
+
total_requests = len(df)
|
229 |
+
avg_response_time = df["response_time_sec"].mean()
|
230 |
+
|
231 |
+
summary = f"""
|
232 |
+
## Analytics Summary
|
233 |
+
|
234 |
+
- **Total API Requests**: {total_requests}
|
235 |
+
- **Total Tokens Used**: {total_tokens:,}
|
236 |
+
- **Average Response Time**: {avg_response_time:.2f} seconds
|
237 |
+
- **Most Used Model**: {model_usage.iloc[model_usage["request_count"].argmax()]["model"]}
|
238 |
+
- **Date Range**: {df["timestamp"].min().date()} to {df["timestamp"].max().date()}
|
239 |
+
"""
|
240 |
+
|
241 |
+
return summary, model_usage_b64, daily_usage_b64, response_time_b64, df.to_dict("records")
|
242 |
+
|
243 |
+
except Exception as e:
|
244 |
+
return f"Error generating analytics: {str(e)}", None, None, None, None
|
245 |
+
|
246 |
# Define available models
|
247 |
models = [
|
248 |
"llama3-70b-8192",
|
|
|
252 |
"allam-2-7b"
|
253 |
]
|
254 |
|
255 |
+
# Define templates
|
256 |
+
templates = ["", "General Assistant", "Code Helper", "Creative Writer", "Technical Expert", "Data Analyst"]
|
257 |
+
|
258 |
# Create the Gradio interface
|
259 |
with gr.Blocks(title="Groq AI Chat Playground") as app:
|
260 |
gr.Markdown("# Groq AI Chat Playground")
|
261 |
+
|
262 |
+
# Create tabs for Chat and Analytics
|
263 |
+
with gr.Tabs():
|
264 |
+
with gr.Tab("Chat"):
|
265 |
+
# New model information accordion
|
266 |
+
with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
|
267 |
+
gr.Markdown("""
|
268 |
+
### Available Models and Use Cases
|
269 |
+
|
270 |
+
**llama3-70b-8192**
|
271 |
+
- Meta's most powerful language model
|
272 |
+
- 70 billion parameters with 8192 token context window
|
273 |
+
- Best for: Complex reasoning, sophisticated content generation, creative writing, and detailed analysis
|
274 |
+
- Optimal for users needing the highest quality AI responses
|
275 |
+
|
276 |
+
**llama3-8b-8192**
|
277 |
+
- Lighter version of Llama 3
|
278 |
+
- 8 billion parameters with 8192 token context window
|
279 |
+
- Best for: Faster responses, everyday tasks, simpler queries
|
280 |
+
- Good balance between performance and speed
|
281 |
+
|
282 |
+
**mistral-saba-24b**
|
283 |
+
- Mistral AI's advanced model
|
284 |
+
- 24 billion parameters
|
285 |
+
- Best for: High-quality reasoning, code generation, and structured outputs
|
286 |
+
- Excellent for technical and professional use cases
|
287 |
+
|
288 |
+
**gemma2-9b-it**
|
289 |
+
- Google's instruction-tuned model
|
290 |
+
- 9 billion parameters
|
291 |
+
- Best for: Following specific instructions, educational content, and general knowledge queries
|
292 |
+
- Well-rounded performance for various tasks
|
293 |
+
|
294 |
+
**allam-2-7b**
|
295 |
+
- Specialized model from Aleph Alpha
|
296 |
+
- 7 billion parameters
|
297 |
+
- Best for: Multilingual support, concise responses, and straightforward Q&A
|
298 |
+
- Good for international users and simpler applications
|
299 |
+
|
300 |
+
*Note: Larger models generally provide higher quality responses but may take slightly longer to generate.*
|
301 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
+
gr.Markdown("Enter your Groq API key to start chatting with AI models.")
|
304 |
+
|
305 |
+
with gr.Row():
|
306 |
+
with gr.Column(scale=2):
|
307 |
+
api_key_input = gr.Textbox(
|
308 |
+
label="Groq API Key",
|
309 |
+
placeholder="Enter your Groq API key (starts with gsk_)",
|
310 |
+
type="password"
|
311 |
+
)
|
312 |
+
|
313 |
+
with gr.Column(scale=1):
|
314 |
+
test_button = gr.Button("Test API Connection")
|
315 |
+
api_status = gr.Textbox(label="API Status", interactive=False)
|
316 |
+
|
317 |
+
with gr.Row():
|
318 |
+
with gr.Column(scale=2):
|
319 |
+
model_dropdown = gr.Dropdown(
|
320 |
+
choices=models,
|
321 |
+
label="Select Model",
|
322 |
+
value="llama3-70b-8192"
|
323 |
+
)
|
324 |
+
with gr.Column(scale=1):
|
325 |
+
template_dropdown = gr.Dropdown(
|
326 |
+
choices=templates,
|
327 |
+
label="Select Template",
|
328 |
+
value=""
|
329 |
+
)
|
330 |
+
|
331 |
+
with gr.Row():
|
332 |
+
with gr.Column():
|
333 |
+
with gr.Accordion("Advanced Settings", open=False):
|
334 |
+
temperature_slider = gr.Slider(
|
335 |
+
minimum=0.0, maximum=1.0, value=0.7, step=0.01,
|
336 |
+
label="Temperature (higher = more creative, lower = more focused)"
|
337 |
+
)
|
338 |
+
max_tokens_slider = gr.Slider(
|
339 |
+
minimum=256, maximum=8192, value=4096, step=256,
|
340 |
+
label="Max Tokens (maximum length of response)"
|
341 |
+
)
|
342 |
+
top_p_slider = gr.Slider(
|
343 |
+
minimum=0.0, maximum=1.0, value=0.95, step=0.01,
|
344 |
+
label="Top P (nucleus sampling probability threshold)"
|
345 |
+
)
|
346 |
+
|
347 |
+
chatbot = gr.Chatbot(label="Conversation", height=500)
|
348 |
+
|
349 |
+
with gr.Row():
|
350 |
+
message_input = gr.Textbox(
|
351 |
+
label="Your Message",
|
352 |
+
placeholder="Type your message here...",
|
353 |
+
lines=3
|
354 |
)
|
355 |
+
|
356 |
+
with gr.Row():
|
357 |
+
submit_button = gr.Button("Send", variant="primary")
|
358 |
+
clear_button = gr.Button("Clear Conversation")
|
359 |
+
|
360 |
+
# Analytics Dashboard Tab
|
361 |
+
with gr.Tab("Analytics Dashboard"):
|
362 |
+
with gr.Column():
|
363 |
+
gr.Markdown("# Usage Analytics Dashboard")
|
364 |
+
refresh_analytics_button = gr.Button("Refresh Analytics")
|
365 |
+
|
366 |
+
analytics_summary = gr.Markdown()
|
367 |
+
|
368 |
+
with gr.Row():
|
369 |
+
with gr.Column():
|
370 |
+
model_usage_chart = gr.Image(label="Token Usage by Model")
|
371 |
+
with gr.Column():
|
372 |
+
daily_usage_chart = gr.Image(label="Daily Token Usage")
|
373 |
+
|
374 |
+
response_time_chart = gr.Image(label="Response Time by Model")
|
375 |
+
|
376 |
+
with gr.Accordion("Raw Data", open=False):
|
377 |
+
analytics_table = gr.DataFrame(label="Raw Analytics Data")
|
378 |
|
379 |
# Connect components with functions
|
380 |
submit_button.click(
|
381 |
+
fn=enhanced_chat_with_groq,
|
382 |
inputs=[
|
383 |
api_key_input,
|
384 |
model_dropdown,
|
|
|
386 |
temperature_slider,
|
387 |
max_tokens_slider,
|
388 |
top_p_slider,
|
389 |
+
chatbot,
|
390 |
+
template_dropdown
|
391 |
],
|
392 |
outputs=chatbot
|
393 |
).then(
|
|
|
397 |
)
|
398 |
|
399 |
message_input.submit(
|
400 |
+
fn=enhanced_chat_with_groq,
|
401 |
inputs=[
|
402 |
api_key_input,
|
403 |
model_dropdown,
|
|
|
405 |
temperature_slider,
|
406 |
max_tokens_slider,
|
407 |
top_p_slider,
|
408 |
+
chatbot,
|
409 |
+
template_dropdown
|
410 |
],
|
411 |
outputs=chatbot
|
412 |
).then(
|
|
|
426 |
inputs=[api_key_input],
|
427 |
outputs=[api_status]
|
428 |
)
|
429 |
+
|
430 |
+
refresh_analytics_button.click(
|
431 |
+
fn=generate_analytics,
|
432 |
+
inputs=[],
|
433 |
+
outputs=[analytics_summary, model_usage_chart, daily_usage_chart, response_time_chart, analytics_table]
|
434 |
+
)
|
435 |
|
436 |
# Launch the app
|
437 |
if __name__ == "__main__":
|