SnehaLeela commited on
Commit
3bd3796
·
verified ·
1 Parent(s): 079c35d

Update app.py

Browse files

theme and user questions

Files changed (1) hide show
  1. app.py +63 -16
app.py CHANGED
@@ -148,25 +148,72 @@ class Me:
148
 
149
  # Main chat function
150
  def chat(self, message, history):
151
- messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
152
- done = False
153
- while not done:
154
- response = self.openai.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  model="gemini-2.5-flash-preview-05-20",
156
- messages=messages,
157
- tools=tools
158
  )
159
- if response.choices[0].finish_reason == "tool_calls":
160
- message = response.choices[0].message
161
- tool_calls = message.tool_calls
162
- results = self.handle_tool_call(tool_calls)
163
- messages.append(message)
164
- messages.extend(results)
165
- else:
166
- done = True
167
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  # Launch Gradio interface
170
  if __name__ == "__main__":
171
  me = Me()
172
- gr.ChatInterface(me.chat, type="messages").launch(share=True)
 
 
 
 
 
 
 
148
 
149
  # Main chat function
150
  def chat(self, message, history):
151
+ # 1. Correctly format the messages list from Gradio's history tuples.
152
+ messages = [{"role": "system", "content": self.system_prompt()}]
153
+ for user_msg, assistant_msg in history:
154
+ messages.append({"role": "user", "content": user_msg})
155
+ messages.append({"role": "assistant", "content": assistant_msg})
156
+ messages.append({"role": "user", "content": message})
157
+
158
+ # 2. First API call: The model decides if it needs a tool.
159
+ response = self.openai.chat.completions.create(
160
+ model="gemini-2.5-flash-preview-05-20",
161
+ messages=messages,
162
+ tools=tools
163
+ )
164
+
165
+ # 3. Check for a tool call.
166
+ if response.choices[0].finish_reason == "tool_calls":
167
+ message_with_tool_calls = response.choices[0].message
168
+ tool_calls = message_with_tool_calls.tool_calls
169
+
170
+ # 4. Execute the tool and get the result.
171
+ results = self.handle_tool_call(tool_calls)
172
+
173
+ # 5. Append the tool call and its output to the messages list.
174
+ messages.append(message_with_tool_calls)
175
+ messages.extend(results)
176
+
177
+ # 6. Second API call: Get the final, conversational response.
178
+ final_response = self.openai.chat.completions.create(
179
  model="gemini-2.5-flash-preview-05-20",
180
+ messages=messages
 
181
  )
182
+ return final_response.choices[0].message.content
183
+ else:
184
+ # If no tool call, return the direct response.
185
+ return response.choices[0].message.content
186
+
187
+
188
+ # Custom CSS with your local image
189
+ css_code = """
190
+ div {
191
+ background-image: url("file/Gemini_Generated.png"); /* Your local image */
192
+ background-size: cover;
193
+ background-position: center;
194
+ background-repeat: no-repeat;
195
+ }
196
+
197
+ .gradio-container {
198
+ background-color: rgba(255, 255, 255, 0.6); /* Optional overlay for readability */
199
+ }
200
+
201
+ .chat-message.user {
202
+ background-color: rgba(208, 230, 255, 0.8);
203
+ }
204
+
205
+ .chat-message.bot {
206
+ background-color: rgba(224, 255, 224, 0.8);
207
+ }
208
+ """
209
 
210
  # Launch Gradio interface
211
  if __name__ == "__main__":
212
  me = Me()
213
+ #gr.ChatInterface(me.chat, type="messages",theme="NoCrypt/miku",).launch(share=True)
214
+ gr.ChatInterface(
215
+ me.chat,
216
+ theme="NoCrypt/miku",
217
+ title="SnehaLeela's Careerbot",
218
+ #css=css_code
219
+ ).launch(share=True)