shukdevdatta123 commited on
Commit
a617169
·
verified ·
1 Parent(s): 5eb0488

Create version1.txt

Browse files
Files changed (1) hide show
  1. version1.txt +236 -0
version1.txt ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import re
4
+ from groq import Groq
5
+
6
+ def validate_api_key(api_key):
7
+ """Validate if the API key has the correct format."""
8
+ # Basic format check for Groq API keys (they typically start with 'gsk_')
9
+ if not api_key.strip():
10
+ return False, "API key cannot be empty"
11
+
12
+ if not api_key.startswith("gsk_"):
13
+ return False, "Invalid API key format. Groq API keys typically start with 'gsk_'"
14
+
15
+ return True, "API key looks valid"
16
+
17
+ def test_api_connection(api_key):
18
+ """Test the API connection with a minimal request."""
19
+ try:
20
+ client = Groq(api_key=api_key)
21
+ # Making a minimal API call to test the connection
22
+ client.chat.completions.create(
23
+ model="llama3-70b-8192",
24
+ messages=[{"role": "user", "content": "test"}],
25
+ max_tokens=5
26
+ )
27
+ return True, "API connection successful"
28
+ except Exception as e:
29
+ # Handle all exceptions since Groq might not expose specific error types
30
+ if "authentication" in str(e).lower() or "api key" in str(e).lower():
31
+ return False, "Authentication failed: Invalid API key"
32
+ else:
33
+ return False, f"Error connecting to Groq API: {str(e)}"
34
+
35
+ def chat_with_groq(api_key, model, user_message, temperature, max_tokens, top_p, chat_history):
36
+ """
37
+ Interact with the Groq API to get a response.
38
+ """
39
+ # Validate API key
40
+ is_valid, message = validate_api_key(api_key)
41
+ if not is_valid:
42
+ return chat_history + [[user_message, f"Error: {message}"]]
43
+
44
+ # Test API connection
45
+ connection_valid, connection_message = test_api_connection(api_key)
46
+ if not connection_valid:
47
+ return chat_history + [[user_message, f"Error: {connection_message}"]]
48
+
49
+ try:
50
+ # Format history for the API
51
+ messages = []
52
+ for human, assistant in chat_history:
53
+ messages.append({"role": "user", "content": human})
54
+ messages.append({"role": "assistant", "content": assistant})
55
+
56
+ # Add the current message
57
+ messages.append({"role": "user", "content": user_message})
58
+
59
+ # Create the client and make the API call
60
+ client = Groq(api_key=api_key)
61
+ response = client.chat.completions.create(
62
+ model=model,
63
+ messages=messages,
64
+ temperature=temperature,
65
+ max_tokens=max_tokens,
66
+ top_p=top_p
67
+ )
68
+
69
+ # Extract the response text
70
+ assistant_response = response.choices[0].message.content
71
+
72
+ # Return updated chat history
73
+ return chat_history + [[user_message, assistant_response]]
74
+
75
+ except Exception as e:
76
+ error_message = f"Error: {str(e)}"
77
+ return chat_history + [[user_message, error_message]]
78
+
79
+ def clear_conversation():
80
+ """Clear the conversation history."""
81
+ return []
82
+
83
+ # Define available models
84
+ models = [
85
+ "llama3-70b-8192",
86
+ "llama3-8b-8192",
87
+ "mistral-saba-24b",
88
+ "gemma2-9b-it",
89
+ "allam-2-7b"
90
+ ]
91
+
92
+ # Create the Gradio interface
93
+ with gr.Blocks(title="Groq AI Chat Playground") as app:
94
+ gr.Markdown("# Groq AI Chat Playground")
95
+
96
+ # New model information accordion
97
+ with gr.Accordion("ℹ️ Model Information - Learn about available models", open=False):
98
+ gr.Markdown("""
99
+ ### Available Models and Use Cases
100
+
101
+ **llama3-70b-8192**
102
+ - Meta's most powerful language model
103
+ - 70 billion parameters with 8192 token context window
104
+ - Best for: Complex reasoning, sophisticated content generation, creative writing, and detailed analysis
105
+ - Optimal for users needing the highest quality AI responses
106
+
107
+ **llama3-8b-8192**
108
+ - Lighter version of Llama 3
109
+ - 8 billion parameters with 8192 token context window
110
+ - Best for: Faster responses, everyday tasks, simpler queries
111
+ - Good balance between performance and speed
112
+
113
+ **mistral-saba-24b**
114
+ - Mistral AI's advanced model
115
+ - 24 billion parameters
116
+ - Best for: High-quality reasoning, code generation, and structured outputs
117
+ - Excellent for technical and professional use cases
118
+
119
+ **gemma2-9b-it**
120
+ - Google's instruction-tuned model
121
+ - 9 billion parameters
122
+ - Best for: Following specific instructions, educational content, and general knowledge queries
123
+ - Well-rounded performance for various tasks
124
+
125
+ **allam-2-7b**
126
+ - Specialized model from Aleph Alpha
127
+ - 7 billion parameters
128
+ - Best for: Multilingual support, concise responses, and straightforward Q&A
129
+ - Good for international users and simpler applications
130
+
131
+ *Note: Larger models generally provide higher quality responses but may take slightly longer to generate.*
132
+ """)
133
+
134
+ gr.Markdown("Enter your Groq API key to start chatting with AI models.")
135
+
136
+ with gr.Row():
137
+ with gr.Column(scale=2):
138
+ api_key_input = gr.Textbox(
139
+ label="Groq API Key",
140
+ placeholder="Enter your Groq API key (starts with gsk_)",
141
+ type="password"
142
+ )
143
+
144
+ with gr.Column(scale=1):
145
+ test_button = gr.Button("Test API Connection")
146
+ api_status = gr.Textbox(label="API Status", interactive=False)
147
+
148
+ with gr.Row():
149
+ with gr.Column():
150
+ model_dropdown = gr.Dropdown(
151
+ choices=models,
152
+ label="Select Model",
153
+ value="llama3-70b-8192"
154
+ )
155
+
156
+ with gr.Row():
157
+ with gr.Column():
158
+ with gr.Accordion("Advanced Settings", open=False):
159
+ temperature_slider = gr.Slider(
160
+ minimum=0.0, maximum=1.0, value=0.7, step=0.01,
161
+ label="Temperature (higher = more creative, lower = more focused)"
162
+ )
163
+ max_tokens_slider = gr.Slider(
164
+ minimum=256, maximum=8192, value=4096, step=256,
165
+ label="Max Tokens (maximum length of response)"
166
+ )
167
+ top_p_slider = gr.Slider(
168
+ minimum=0.0, maximum=1.0, value=0.95, step=0.01,
169
+ label="Top P (nucleus sampling probability threshold)"
170
+ )
171
+
172
+ chatbot = gr.Chatbot(label="Conversation", height=500)
173
+
174
+ with gr.Row():
175
+ message_input = gr.Textbox(
176
+ label="Your Message",
177
+ placeholder="Type your message here...",
178
+ lines=3
179
+ )
180
+
181
+ with gr.Row():
182
+ submit_button = gr.Button("Send", variant="primary")
183
+ clear_button = gr.Button("Clear Conversation")
184
+
185
+ # Connect components with functions
186
+ submit_button.click(
187
+ fn=chat_with_groq,
188
+ inputs=[
189
+ api_key_input,
190
+ model_dropdown,
191
+ message_input,
192
+ temperature_slider,
193
+ max_tokens_slider,
194
+ top_p_slider,
195
+ chatbot
196
+ ],
197
+ outputs=chatbot
198
+ ).then(
199
+ fn=lambda: "",
200
+ inputs=None,
201
+ outputs=message_input
202
+ )
203
+
204
+ message_input.submit(
205
+ fn=chat_with_groq,
206
+ inputs=[
207
+ api_key_input,
208
+ model_dropdown,
209
+ message_input,
210
+ temperature_slider,
211
+ max_tokens_slider,
212
+ top_p_slider,
213
+ chatbot
214
+ ],
215
+ outputs=chatbot
216
+ ).then(
217
+ fn=lambda: "",
218
+ inputs=None,
219
+ outputs=message_input
220
+ )
221
+
222
+ clear_button.click(
223
+ fn=clear_conversation,
224
+ inputs=None,
225
+ outputs=chatbot
226
+ )
227
+
228
+ test_button.click(
229
+ fn=test_api_connection,
230
+ inputs=[api_key_input],
231
+ outputs=[api_status]
232
+ )
233
+
234
+ # Launch the app
235
+ if __name__ == "__main__":
236
+ app.launch(share=False)