shukdevdatta123 commited on
Commit
c36db17
·
verified ·
1 Parent(s): 9117085

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -86
app.py CHANGED
@@ -11,6 +11,7 @@ import json
11
 
12
  def validate_api_key(api_key):
13
  """Validate if the API key has the correct format."""
 
14
  if not api_key.strip():
15
  return False, "API key cannot be empty"
16
 
@@ -23,6 +24,7 @@ def test_api_connection(api_key):
23
  """Test the API connection with a minimal request."""
24
  try:
25
  client = Groq(api_key=api_key)
 
26
  client.chat.completions.create(
27
  model="llama3-70b-8192",
28
  messages=[{"role": "user", "content": "test"}],
@@ -30,11 +32,13 @@ def test_api_connection(api_key):
30
  )
31
  return True, "API connection successful"
32
  except Exception as e:
 
33
  if "authentication" in str(e).lower() or "api key" in str(e).lower():
34
  return False, "Authentication failed: Invalid API key"
35
  else:
36
  return False, f"Error connecting to Groq API: {str(e)}"
37
 
 
38
  os.makedirs("analytics", exist_ok=True)
39
 
40
  def log_chat_interaction(model, tokens_used, response_time, user_message_length):
@@ -51,6 +55,7 @@ def log_chat_interaction(model, tokens_used, response_time, user_message_length)
51
  "user_message_length": user_message_length
52
  }
53
 
 
54
  if os.path.exists(log_file):
55
  try:
56
  with open(log_file, "r") as f:
@@ -81,8 +86,10 @@ def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_token
81
  """Enhanced chat function with analytics logging"""
82
  start_time = datetime.now()
83
 
 
84
  system_prompt = get_template_prompt(template_name) if template_name else ""
85
 
 
86
  is_valid, message = validate_api_key(api_key)
87
  if not is_valid:
88
  return chat_history + [[user_message, f"Error: {message}"]]
@@ -92,6 +99,7 @@ def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_token
92
  return chat_history + [[user_message, f"Error: {connection_message}"]]
93
 
94
  try:
 
95
  messages = []
96
 
97
  if system_prompt:
@@ -103,6 +111,7 @@ def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_token
103
 
104
  messages.append({"role": "user", "content": user_message})
105
 
 
106
  client = Groq(api_key=api_key)
107
  response = client.chat.completions.create(
108
  model=model,
@@ -112,10 +121,12 @@ def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_token
112
  top_p=top_p
113
  )
114
 
 
115
  end_time = datetime.now()
116
  response_time = (end_time - start_time).total_seconds()
117
  tokens_used = response.usage.total_tokens
118
 
 
119
  log_chat_interaction(
120
  model=model,
121
  tokens_used=tokens_used,
@@ -123,6 +134,7 @@ def enhanced_chat_with_groq(api_key, model, user_message, temperature, max_token
123
  user_message_length=len(user_message)
124
  )
125
 
 
126
  assistant_response = response.choices[0].message.content
127
 
128
  return chat_history + [[user_message, assistant_response]]
@@ -135,6 +147,13 @@ def clear_conversation():
135
  """Clear the conversation history."""
136
  return []
137
 
 
 
 
 
 
 
 
138
  def plt_to_html(fig):
139
  """Convert matplotlib figure to HTML img tag"""
140
  buf = io.BytesIO()
@@ -149,18 +168,20 @@ def generate_analytics():
149
  log_file = "analytics/chat_log.json"
150
 
151
  if not os.path.exists(log_file):
152
- return "No analytics data available yet.", None, None, None, []
153
 
154
  try:
155
  with open(log_file, "r") as f:
156
  logs = json.load(f)
157
 
158
  if not logs:
159
- return "No analytics data available yet.", None, None, None, []
160
 
 
161
  df = pd.DataFrame(logs)
162
  df["timestamp"] = pd.to_datetime(df["timestamp"])
163
 
 
164
  model_usage = df.groupby("model").agg({
165
  "tokens_used": "sum",
166
  "timestamp": "count"
@@ -176,20 +197,7 @@ def generate_analytics():
176
  plt.tight_layout()
177
  model_usage_img = plt_to_html(fig1)
178
 
179
- df["date"] = df["timestamp"].dt.date
180
- daily_usage = df.groupby("date").agg({
181
- "tokens_used": "sum"
182
- }).reset_index()
183
-
184
- fig2 = plt.figure(figsize=(10, 6))
185
- plt.plot(daily_usage["date"], daily_usage["tokens_used"], marker="o")
186
- plt.title("Daily Token Usage")
187
- plt.xlabel("Date")
188
- plt.ylabel("Tokens Used")
189
- plt.grid(True)
190
- plt.tight_layout()
191
- daily_usage_img = plt_to_html(fig2)
192
-
193
  model_response_time = df.groupby("model").agg({
194
  "response_time_sec": "mean"
195
  }).reset_index()
@@ -203,10 +211,12 @@ def generate_analytics():
203
  plt.tight_layout()
204
  response_time_img = plt_to_html(fig3)
205
 
 
206
  total_tokens = df["tokens_used"].sum()
207
  total_requests = len(df)
208
  avg_response_time = df["response_time_sec"].mean()
209
 
 
210
  if not model_usage.empty:
211
  most_used_model = model_usage.iloc[model_usage["request_count"].argmax()]["model"]
212
  else:
@@ -214,7 +224,7 @@ def generate_analytics():
214
 
215
  summary = f"""
216
  ## Analytics Summary
217
-
218
  - **Total API Requests**: {total_requests}
219
  - **Total Tokens Used**: {total_tokens:,}
220
  - **Average Response Time**: {avg_response_time:.2f} seconds
@@ -222,21 +232,13 @@ def generate_analytics():
222
  - **Date Range**: {df["timestamp"].min().date()} to {df["timestamp"].max().date()}
223
  """
224
 
225
- return summary, model_usage_img, daily_usage_img, response_time_img, df.to_dict("records")
226
 
227
  except Exception as e:
228
  error_message = f"Error generating analytics: {str(e)}"
229
- return error_message, None, None, None, []
230
-
231
- def clear_analytics():
232
- """Clear the analytics data"""
233
- log_file = "analytics/chat_log.json"
234
-
235
- if os.path.exists(log_file):
236
- os.remove(log_file)
237
-
238
- return "Analytics data has been cleared.", None, None, None
239
 
 
240
  models = [
241
  "llama3-70b-8192",
242
  "llama3-8b-8192",
@@ -245,49 +247,19 @@ models = [
245
  "allam-2-7b"
246
  ]
247
 
 
248
  templates = ["General Assistant", "Code Helper", "Creative Writer", "Technical Expert", "Data Analyst"]
249
 
 
250
  with gr.Blocks(title="Groq AI Chat Playground") as app:
251
  gr.Markdown("# Groq AI Chat Playground")
252
 
 
253
  with gr.Tabs():
254
  with gr.Tab("Chat"):
 
255
  with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
256
- gr.Markdown("""
257
- ### Available Models and Use Cases
258
-
259
- **llama3-70b-8192**
260
- - Meta's most powerful language model
261
- - 70 billion parameters with 8192 token context window
262
- - Best for: Complex reasoning, sophisticated content generation, creative writing, and detailed analysis
263
- - Optimal for users needing the highest quality AI responses
264
-
265
- **llama3-8b-8192**
266
- - Lighter version of Llama 3
267
- - 8 billion parameters with 8192 token context window
268
- - Best for: Faster responses, everyday tasks, simpler queries
269
- - Good balance between performance and speed
270
-
271
- **mistral-saba-24b**
272
- - Mistral AI's advanced model
273
- - 24 billion parameters
274
- - Best for: High-quality reasoning, code generation, and structured outputs
275
- - Excellent for technical and professional use cases
276
-
277
- **gemma2-9b-it**
278
- - Google's instruction-tuned model
279
- - 9 billion parameters
280
- - Best for: Following specific instructions, educational content, and general knowledge queries
281
- - Well-rounded performance for various tasks
282
-
283
- **allam-2-7b**
284
- - Specialized model from Aleph Alpha
285
- - 7 billion parameters
286
- - Best for: Multilingual support, concise responses, and straightforward Q&A
287
- - Good for international users and simpler applications
288
-
289
- *Note: Larger models generally provide higher quality responses but may take slightly longer to generate.*
290
- """)
291
 
292
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
293
 
@@ -346,6 +318,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
346
  submit_button = gr.Button("Send", variant="primary")
347
  clear_button = gr.Button("Clear Conversation")
348
 
 
349
  with gr.Tab("Analytics Dashboard"):
350
  with gr.Column():
351
  gr.Markdown("# Usage Analytics Dashboard")
@@ -357,23 +330,16 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
357
  with gr.Row():
358
  with gr.Column():
359
  model_usage_chart = gr.HTML(label="Token Usage by Model")
360
- with gr.Column():
361
- response_time_chart = gr.HTML(label="Response Time by Model")
362
 
363
- analytics_table = gr.DataFrame(label="Raw Analytics Data")
 
 
 
364
 
 
365
  submit_button.click(
366
  fn=enhanced_chat_with_groq,
367
- inputs=[
368
- api_key_input,
369
- model_dropdown,
370
- message_input,
371
- temperature_slider,
372
- max_tokens_slider,
373
- top_p_slider,
374
- chatbot,
375
- template_dropdown
376
- ],
377
  outputs=chatbot
378
  ).then(
379
  fn=lambda: "",
@@ -383,16 +349,7 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
383
 
384
  message_input.submit(
385
  fn=enhanced_chat_with_groq,
386
- inputs=[
387
- api_key_input,
388
- model_dropdown,
389
- message_input,
390
- temperature_slider,
391
- max_tokens_slider,
392
- top_p_slider,
393
- chatbot,
394
- template_dropdown
395
- ],
396
  outputs=chatbot
397
  ).then(
398
  fn=lambda: "",
@@ -424,5 +381,6 @@ with gr.Blocks(title="Groq AI Chat Playground") as app:
424
  outputs=[analytics_summary, model_usage_chart, response_time_chart, analytics_table]
425
  )
426
 
 
427
  if __name__ == "__main__":
428
  app.launch(share=False)
 
11
 
12
  def validate_api_key(api_key):
13
  """Validate if the API key has the correct format."""
14
+ # Basic format check for Groq API keys (they typically start with 'gsk_')
15
  if not api_key.strip():
16
  return False, "API key cannot be empty"
17
 
 
24
  """Test the API connection with a minimal request."""
25
  try:
26
  client = Groq(api_key=api_key)
27
+ # Making a minimal API call to test the connection
28
  client.chat.completions.create(
29
  model="llama3-70b-8192",
30
  messages=[{"role": "user", "content": "test"}],
 
32
  )
33
  return True, "API connection successful"
34
  except Exception as e:
35
+ # Handle all exceptions since Groq might not expose specific error types
36
  if "authentication" in str(e).lower() or "api key" in str(e).lower():
37
  return False, "Authentication failed: Invalid API key"
38
  else:
39
  return False, f"Error connecting to Groq API: {str(e)}"
40
 
41
+ # Ensure analytics directory exists
42
  os.makedirs("analytics", exist_ok=True)
43
 
44
  def log_chat_interaction(model, tokens_used, response_time, user_message_length):
 
55
  "user_message_length": user_message_length
56
  }
57
 
58
+ # Append to existing log or create new file
59
  if os.path.exists(log_file):
60
  try:
61
  with open(log_file, "r") as f:
 
86
  """Enhanced chat function with analytics logging"""
87
  start_time = datetime.now()
88
 
89
+ # Get system prompt if template is provided
90
  system_prompt = get_template_prompt(template_name) if template_name else ""
91
 
92
+ # Validate and process as before
93
  is_valid, message = validate_api_key(api_key)
94
  if not is_valid:
95
  return chat_history + [[user_message, f"Error: {message}"]]
 
99
  return chat_history + [[user_message, f"Error: {connection_message}"]]
100
 
101
  try:
102
+ # Format history
103
  messages = []
104
 
105
  if system_prompt:
 
111
 
112
  messages.append({"role": "user", "content": user_message})
113
 
114
+ # Make API call
115
  client = Groq(api_key=api_key)
116
  response = client.chat.completions.create(
117
  model=model,
 
121
  top_p=top_p
122
  )
123
 
124
+ # Calculate metrics
125
  end_time = datetime.now()
126
  response_time = (end_time - start_time).total_seconds()
127
  tokens_used = response.usage.total_tokens
128
 
129
+ # Log the interaction
130
  log_chat_interaction(
131
  model=model,
132
  tokens_used=tokens_used,
 
134
  user_message_length=len(user_message)
135
  )
136
 
137
+ # Extract response
138
  assistant_response = response.choices[0].message.content
139
 
140
  return chat_history + [[user_message, assistant_response]]
 
147
  """Clear the conversation history."""
148
  return []
149
 
150
+ def clear_analytics():
151
+ """Clear the analytics data."""
152
+ log_file = "analytics/chat_log.json"
153
+ if os.path.exists(log_file):
154
+ os.remove(log_file)
155
+ return "Analytics data has been cleared."
156
+
157
  def plt_to_html(fig):
158
  """Convert matplotlib figure to HTML img tag"""
159
  buf = io.BytesIO()
 
168
  log_file = "analytics/chat_log.json"
169
 
170
  if not os.path.exists(log_file):
171
+ return "No analytics data available yet.", None, None, []
172
 
173
  try:
174
  with open(log_file, "r") as f:
175
  logs = json.load(f)
176
 
177
  if not logs:
178
+ return "No analytics data available yet.", None, None, []
179
 
180
+ # Convert to DataFrame
181
  df = pd.DataFrame(logs)
182
  df["timestamp"] = pd.to_datetime(df["timestamp"])
183
 
184
+ # Generate usage by model chart
185
  model_usage = df.groupby("model").agg({
186
  "tokens_used": "sum",
187
  "timestamp": "count"
 
197
  plt.tight_layout()
198
  model_usage_img = plt_to_html(fig1)
199
 
200
+ # Generate response time chart
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  model_response_time = df.groupby("model").agg({
202
  "response_time_sec": "mean"
203
  }).reset_index()
 
211
  plt.tight_layout()
212
  response_time_img = plt_to_html(fig3)
213
 
214
+ # Summary statistics
215
  total_tokens = df["tokens_used"].sum()
216
  total_requests = len(df)
217
  avg_response_time = df["response_time_sec"].mean()
218
 
219
+ # Handling the case where there might not be enough data
220
  if not model_usage.empty:
221
  most_used_model = model_usage.iloc[model_usage["request_count"].argmax()]["model"]
222
  else:
 
224
 
225
  summary = f"""
226
  ## Analytics Summary
227
+
228
  - **Total API Requests**: {total_requests}
229
  - **Total Tokens Used**: {total_tokens:,}
230
  - **Average Response Time**: {avg_response_time:.2f} seconds
 
232
  - **Date Range**: {df["timestamp"].min().date()} to {df["timestamp"].max().date()}
233
  """
234
 
235
+ return summary, model_usage_img, response_time_img, df.to_dict("records")
236
 
237
  except Exception as e:
238
  error_message = f"Error generating analytics: {str(e)}"
239
+ return error_message, None, None, []
 
 
 
 
 
 
 
 
 
240
 
241
+ # Define available models
242
  models = [
243
  "llama3-70b-8192",
244
  "llama3-8b-8192",
 
247
  "allam-2-7b"
248
  ]
249
 
250
+ # Define templates
251
  templates = ["General Assistant", "Code Helper", "Creative Writer", "Technical Expert", "Data Analyst"]
252
 
253
+ # Create the Gradio interface
254
  with gr.Blocks(title="Groq AI Chat Playground") as app:
255
  gr.Markdown("# Groq AI Chat Playground")
256
 
257
+ # Create tabs for Chat and Analytics
258
  with gr.Tabs():
259
  with gr.Tab("Chat"):
260
+ # New model information accordion
261
  with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
262
+ gr.Markdown(""" ### Available Models and Use Cases...""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
  gr.Markdown("Enter your Groq API key to start chatting with AI models.")
265
 
 
318
  submit_button = gr.Button("Send", variant="primary")
319
  clear_button = gr.Button("Clear Conversation")
320
 
321
+ # Analytics Dashboard Tab
322
  with gr.Tab("Analytics Dashboard"):
323
  with gr.Column():
324
  gr.Markdown("# Usage Analytics Dashboard")
 
330
  with gr.Row():
331
  with gr.Column():
332
  model_usage_chart = gr.HTML(label="Token Usage by Model")
 
 
333
 
334
+ response_time_chart = gr.HTML(label="Response Time by Model")
335
+
336
+ with gr.Accordion("Raw Data", open=False):
337
+ analytics_table = gr.DataFrame(label="Raw Analytics Data")
338
 
339
+ # Connect components with functions
340
  submit_button.click(
341
  fn=enhanced_chat_with_groq,
342
+ inputs=[api_key_input, model_dropdown, message_input, temperature_slider, max_tokens_slider, top_p_slider, chatbot, template_dropdown],
 
 
 
 
 
 
 
 
 
343
  outputs=chatbot
344
  ).then(
345
  fn=lambda: "",
 
349
 
350
  message_input.submit(
351
  fn=enhanced_chat_with_groq,
352
+ inputs=[api_key_input, model_dropdown, message_input, temperature_slider, max_tokens_slider, top_p_slider, chatbot, template_dropdown],
 
 
 
 
 
 
 
 
 
353
  outputs=chatbot
354
  ).then(
355
  fn=lambda: "",
 
381
  outputs=[analytics_summary, model_usage_chart, response_time_chart, analytics_table]
382
  )
383
 
384
+ # Launch the app
385
  if __name__ == "__main__":
386
  app.launch(share=False)