SnehaLeela commited on
Commit
08a0e60
·
verified ·
1 Parent(s): 582d097

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -25
app.py CHANGED
@@ -6,13 +6,11 @@ from pypdf import PdfReader
6
  import gradio as gr
7
  import csv
8
 
9
- print("hi")
10
-
11
  # Load environment variables
12
  load_dotenv(override=True)
13
 
14
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
15
- google_api_key = os.getenv("GOOGLE_API_KEY")
16
  gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
17
 
18
  # CSV files for logging
@@ -150,40 +148,36 @@ class Me:
150
 
151
  # Main chat function
152
  def chat(self, message, history):
153
- # 1. Correctly format the messages list from Gradio's history tuples.
154
- messages = [{"role": "system", "content": self.system_prompt()}]
155
- for user_msg, assistant_msg in history:
156
- messages.append({"role": "user", "content": user_msg})
157
- messages.append({"role": "assistant", "content": assistant_msg})
158
- messages.append({"role": "user", "content": message})
159
-
160
- # 2. First API call: The model decides if it needs a tool.
161
  response = self.openai.chat.completions.create(
162
  model="gemini-2.5-flash-preview-05-20",
163
  messages=messages,
164
  tools=tools
165
  )
166
-
167
- # 3. Check for a tool call.
168
  if response.choices[0].finish_reason == "tool_calls":
169
- message_with_tool_calls = response.choices[0].message
170
- tool_calls = message_with_tool_calls.tool_calls
171
 
172
- # 4. Execute the tool and get the result.
173
- results = self.handle_tool_call(tool_calls)
174
 
175
- # 5. Append the tool call and its output to the messages list.
176
- messages.append(message_with_tool_calls)
177
  messages.extend(results)
178
-
179
- # 6. Second API call: Get the final, conversational response.
180
- final_response = self.openai.chat.completions.create(
181
  model="gemini-2.5-flash-preview-05-20",
182
- messages=messages
183
  )
184
- return final_response.choices[0].message.content
185
  else:
186
- # If no tool call, return the direct response.
187
  return response.choices[0].message.content
188
 
189
 
 
6
  import gradio as gr
7
  import csv
8
 
 
 
9
  # Load environment variables
10
  load_dotenv(override=True)
11
 
12
  GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
13
+ google_api_key = os.getenv("GOOGLE_API_KEY","AIzaSyDGXQliok_IEo51FGx_7EWiJ_WlCj0e0qc")
14
  gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
15
 
16
  # CSV files for logging
 
148
 
149
  # Main chat function
150
  def chat(self, message, history):
151
+ messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
152
+ done = False
153
+ #while not done:
154
+ # Send the first message
 
 
 
 
155
  response = self.openai.chat.completions.create(
156
  model="gemini-2.5-flash-preview-05-20",
157
  messages=messages,
158
  tools=tools
159
  )
160
+
161
+ # Check if the response contains tool calls
162
  if response.choices[0].finish_reason == "tool_calls":
163
+ message_to_add = response.choices[0].message
164
+ tool_calls_to_execute = message_to_add.tool_calls
165
 
166
+ # Execute the tool and get the result.
167
+ results = self.handle_tool_call(tool_calls_to_execute)
168
 
169
+ # Append the tool call and its output to the messages list.
170
+ messages.append(message_to_add)
171
  messages.extend(results)
172
+
173
+ # Now call the API again with the tool output
174
+ response = self.openai.chat.completions.create(
175
  model="gemini-2.5-flash-preview-05-20",
176
+ messages=messages,
177
  )
178
+ return response.choices[0].message.content
179
  else:
180
+ # If no tool call, return the direct response
181
  return response.choices[0].message.content
182
 
183