shukdevdatta123 commited on
Commit
de6ef81
·
verified ·
1 Parent(s): 6aa4b02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -77
app.py CHANGED
@@ -147,6 +147,13 @@ def clear_conversation():
147
  """Clear the conversation history."""
148
  return []
149
 
 
 
 
 
 
 
 
150
  def plt_to_html(fig):
151
  """Convert matplotlib figure to HTML img tag"""
152
  buf = io.BytesIO()
@@ -161,14 +168,14 @@ def generate_analytics():
161
  log_file = "analytics/chat_log.json"
162
 
163
  if not os.path.exists(log_file):
164
- return "No analytics data available yet.", None, None, None, []
165
 
166
  try:
167
  with open(log_file, "r") as f:
168
  logs = json.load(f)
169
 
170
  if not logs:
171
- return "No analytics data available yet.", None, None, None, []
172
 
173
  # Convert to DataFrame
174
  df = pd.DataFrame(logs)
@@ -190,21 +197,6 @@ def generate_analytics():
190
  plt.tight_layout()
191
  model_usage_img = plt_to_html(fig1)
192
 
193
- # Generate usage over time chart
194
- df["date"] = df["timestamp"].dt.date
195
- daily_usage = df.groupby("date").agg({
196
- "tokens_used": "sum"
197
- }).reset_index()
198
-
199
- fig2 = plt.figure(figsize=(10, 6))
200
- plt.plot(daily_usage["date"], daily_usage["tokens_used"], marker="o")
201
- plt.title("Daily Token Usage")
202
- plt.xlabel("Date")
203
- plt.ylabel("Tokens Used")
204
- plt.grid(True)
205
- plt.tight_layout()
206
- daily_usage_img = plt_to_html(fig2)
207
-
208
  # Generate response time chart
209
  model_response_time = df.groupby("model").agg({
210
  "response_time_sec": "mean"
@@ -240,11 +232,11 @@ def generate_analytics():
240
  - **Date Range**: {df["timestamp"].min().date()} to {df["timestamp"].max().date()}
241
  """
242
 
243
- return summary, model_usage_img, daily_usage_img, response_time_img, df.to_dict("records")
244
 
245
  except Exception as e:
246
  error_message = f"Error generating analytics: {str(e)}"
247
- return error_message, None, None, None, []
248
 
249
  # Define available models
250
  models = [
@@ -267,41 +259,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
267
  with gr.Tab("Chat"):
268
  # New model information accordion
269
  with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
270
- gr.Markdown("""
271
- ### Available Models and Use Cases
272
-
273
- **llama3-70b-8192**
274
- - Meta's most powerful language model
275
- - 70 billion parameters with 8192 token context window
276
- - Best for: Complex reasoning, sophisticated content generation, creative writing, and detailed analysis
277
- - Optimal for users needing the highest quality AI responses
278
-
279
- **llama3-8b-8192**
280
- - Lighter version of Llama 3
281
- - 8 billion parameters with 8192 token context window
282
- - Best for: Faster responses, everyday tasks, simpler queries
283
- - Good balance between performance and speed
284
-
285
- **mistral-saba-24b**
286
- - Mistral AI's advanced model
287
- - 24 billion parameters
288
- - Best for: High-quality reasoning, code generation, and structured outputs
289
- - Excellent for technical and professional use cases
290
-
291
- **gemma2-9b-it**
292
- - Google's instruction-tuned model
293
- - 9 billion parameters
294
- - Best for: Following specific instructions, educational content, and general knowledge queries
295
- - Well-rounded performance for various tasks
296
-
297
- **allam-2-7b**
298
- - Specialized model from Aleph Alpha
299
- - 7 billion parameters
300
- - Best for: Multilingual support, concise responses, and straightforward Q&A
301
- - Good for international users and simpler applications
302
-
303
- *Note: Larger models generally provide higher quality responses but may take slightly longer to generate.*
304
- """)
305
 
306
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
307
 
@@ -365,14 +323,13 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
365
  with gr.Column():
366
  gr.Markdown("# Usage Analytics Dashboard")
367
  refresh_analytics_button = gr.Button("Refresh Analytics")
 
368
 
369
  analytics_summary = gr.Markdown()
370
 
371
  with gr.Row():
372
  with gr.Column():
373
  model_usage_chart = gr.HTML(label="Token Usage by Model")
374
- with gr.Column():
375
- daily_usage_chart = gr.HTML(label="Daily Token Usage")
376
 
377
  response_time_chart = gr.HTML(label="Response Time by Model")
378
 
@@ -382,16 +339,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
382
  # Connect components with functions
383
  submit_button.click(
384
  fn=enhanced_chat_with_groq,
385
- inputs=[
386
- api_key_input,
387
- model_dropdown,
388
- message_input,
389
- temperature_slider,
390
- max_tokens_slider,
391
- top_p_slider,
392
- chatbot,
393
- template_dropdown
394
- ],
395
  outputs=chatbot
396
  ).then(
397
  fn=lambda: "",
@@ -401,16 +349,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
401
 
402
  message_input.submit(
403
  fn=enhanced_chat_with_groq,
404
- inputs=[
405
- api_key_input,
406
- model_dropdown,
407
- message_input,
408
- temperature_slider,
409
- max_tokens_slider,
410
- top_p_slider,
411
- chatbot,
412
- template_dropdown
413
- ],
414
  outputs=chatbot
415
  ).then(
416
  fn=lambda: "",
@@ -433,7 +372,13 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
433
  refresh_analytics_button.click(
434
  fn=generate_analytics,
435
  inputs=[],
436
- outputs=[analytics_summary, model_usage_chart, daily_usage_chart, response_time_chart, analytics_table]
 
 
 
 
 
 
437
  )
438
 
439
  # Launch the app
 
147
  """Clear the conversation history."""
148
  return []
149
 
150
+ def clear_analytics():
151
+ """Clear the analytics data."""
152
+ log_file = "analytics/chat_log.json"
153
+ if os.path.exists(log_file):
154
+ os.remove(log_file)
155
+ return "Analytics data has been cleared."
156
+
157
  def plt_to_html(fig):
158
  """Convert matplotlib figure to HTML img tag"""
159
  buf = io.BytesIO()
 
168
  log_file = "analytics/chat_log.json"
169
 
170
  if not os.path.exists(log_file):
171
+ return "No analytics data available yet.", None, None, []
172
 
173
  try:
174
  with open(log_file, "r") as f:
175
  logs = json.load(f)
176
 
177
  if not logs:
178
+ return "No analytics data available yet.", None, None, []
179
 
180
  # Convert to DataFrame
181
  df = pd.DataFrame(logs)
 
197
  plt.tight_layout()
198
  model_usage_img = plt_to_html(fig1)
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  # Generate response time chart
201
  model_response_time = df.groupby("model").agg({
202
  "response_time_sec": "mean"
 
232
  - **Date Range**: {df["timestamp"].min().date()} to {df["timestamp"].max().date()}
233
  """
234
 
235
+ return summary, model_usage_img, response_time_img, df.to_dict("records")
236
 
237
  except Exception as e:
238
  error_message = f"Error generating analytics: {str(e)}"
239
+ return error_message, None, None, []
240
 
241
  # Define available models
242
  models = [
 
259
  with gr.Tab("Chat"):
260
  # New model information accordion
261
  with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
262
+ gr.Markdown(""" ### Available Models and Use Cases...""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
265
 
 
323
  with gr.Column():
324
  gr.Markdown("# Usage Analytics Dashboard")
325
  refresh_analytics_button = gr.Button("Refresh Analytics")
326
+ clear_analytics_button = gr.Button("Clear Analytics Data")
327
 
328
  analytics_summary = gr.Markdown()
329
 
330
  with gr.Row():
331
  with gr.Column():
332
  model_usage_chart = gr.HTML(label="Token Usage by Model")
 
 
333
 
334
  response_time_chart = gr.HTML(label="Response Time by Model")
335
 
 
339
  # Connect components with functions
340
  submit_button.click(
341
  fn=enhanced_chat_with_groq,
342
+ inputs=[api_key_input, model_dropdown, message_input, temperature_slider, max_tokens_slider, top_p_slider, chatbot, template_dropdown],
 
 
 
 
 
 
 
 
 
343
  outputs=chatbot
344
  ).then(
345
  fn=lambda: "",
 
349
 
350
  message_input.submit(
351
  fn=enhanced_chat_with_groq,
352
+ inputs=[api_key_input, model_dropdown, message_input, temperature_slider, max_tokens_slider, top_p_slider, chatbot, template_dropdown],
 
 
 
 
 
 
 
 
 
353
  outputs=chatbot
354
  ).then(
355
  fn=lambda: "",
 
372
  refresh_analytics_button.click(
373
  fn=generate_analytics,
374
  inputs=[],
375
+ outputs=[analytics_summary, model_usage_chart, response_time_chart, analytics_table]
376
+ )
377
+
378
+ clear_analytics_button.click(
379
+ fn=clear_analytics,
380
+ inputs=[],
381
+ outputs=[analytics_summary, model_usage_chart, response_time_chart, analytics_table]
382
  )
383
 
384
  # Launch the app