arithescientist commited on
Commit
d6689d0
·
verified ·
1 Parent(s): f0e4f1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -59
app.py CHANGED
@@ -93,17 +93,18 @@ few_shot_prompt = FewShotPromptTemplate(
93
  examples=few_shot_examples,
94
  prefix=system_prefix,
95
  suffix="Question: {input}\nSQL Query:",
96
- input_variables=["input"]
97
  )
98
 
99
  # Step 5: Initialize the LLM and toolkit
100
  llm = ChatOpenAI(temperature=0, openai_api_key=openai_api_key)
101
  toolkit = SQLDatabaseToolkit(db=engine, llm=llm)
102
 
103
- # Step 6: Create the agent
104
  agent_prompt = ChatPromptTemplate.from_messages([
105
  SystemMessagePromptTemplate(prompt=few_shot_prompt),
106
- HumanMessagePromptTemplate.from_template("{input}")
 
107
  ])
108
 
109
  sql_agent = create_sql_agent(
@@ -111,7 +112,7 @@ sql_agent = create_sql_agent(
111
  toolkit=toolkit,
112
  prompt=agent_prompt,
113
  verbose=True,
114
- agent_type="openai-functions",
115
  max_iterations=5
116
  )
117
 
@@ -128,65 +129,46 @@ def process_input():
128
  with st.spinner("Generating SQL query..."):
129
  response = sql_agent.run(user_prompt)
130
 
131
- # Check if the response contains SQL code
132
- if "SELECT" in response.upper():
133
- sql_query = response.strip()
134
- logging.info(f"Generated SQL Query: {sql_query}")
135
-
136
- # Attempt to execute SQL query and handle exceptions
137
- try:
138
- result = pd.read_sql_query(sql_query, conn)
139
-
140
- if result.empty:
141
- assistant_response = "The query returned no results. Please try a different question."
142
- st.session_state.history.append({"role": "assistant", "content": assistant_response})
143
- else:
144
- # Limit the result to first 10 rows for display
145
- result_display = result.head(10)
146
- st.session_state.history.append({"role": "assistant", "content": "Here are the results:"})
147
- st.session_state.history.append({"role": "assistant", "content": result_display})
148
-
149
- # Generate insights based on the query result
150
- insights_template = """
151
- You are an expert data analyst. Based on the user's question and the SQL query result provided below, generate a concise analysis that includes key data insights and actionable recommendations. Limit the response to a maximum of 150 words.
152
-
153
- User's Question: {question}
154
-
155
- SQL Query Result:
156
- {result}
157
-
158
- Concise Analysis:
159
- """
160
- insights_prompt = PromptTemplate(template=insights_template, input_variables=['question', 'result'])
161
- insights_chain = LLMChain(llm=llm, prompt=insights_prompt)
162
-
163
- result_str = result_display.to_string(index=False)
164
- insights = insights_chain.run({'question': user_prompt, 'result': result_str})
165
-
166
- # Append the assistant's insights to the history
167
- st.session_state.history.append({"role": "assistant", "content": insights})
168
- except Exception as e:
169
- logging.error(f"An error occurred during SQL execution: {e}")
170
- assistant_response = f"Error executing SQL query: {e}"
171
- st.session_state.history.append({"role": "assistant", "content": assistant_response})
172
- else:
173
- # Handle responses that do not contain SQL queries
174
- assistant_response = response
175
- st.session_state.history.append({"role": "assistant", "content": assistant_response})
176
 
177
- # Evaluate the response for harmful content
178
  try:
179
- evaluator = load_evaluator("harmful_content", llm=llm)
180
- eval_result = evaluator.evaluate_strings(
181
- input=user_prompt,
182
- prediction=response
183
- )
184
- if eval_result['flagged']:
185
- st.warning("The assistant's response may not be appropriate.")
186
  else:
187
- logging.info("Response evaluated as appropriate.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  except Exception as e:
189
- logging.error(f"An error occurred during evaluation: {e}")
 
 
190
  except Exception as e:
191
  logging.error(f"An error occurred: {e}")
192
  assistant_response = f"Error: {e}"
 
93
  examples=few_shot_examples,
94
  prefix=system_prefix,
95
  suffix="Question: {input}\nSQL Query:",
96
+ input_variables=["input", "agent_scratchpad"]
97
  )
98
 
99
  # Step 5: Initialize the LLM and toolkit
100
  llm = ChatOpenAI(temperature=0, openai_api_key=openai_api_key)
101
  toolkit = SQLDatabaseToolkit(db=engine, llm=llm)
102
 
103
+ # Step 6: Create the agent using 'zero-shot-react-description' agent type
104
  agent_prompt = ChatPromptTemplate.from_messages([
105
  SystemMessagePromptTemplate(prompt=few_shot_prompt),
106
+ HumanMessagePromptTemplate.from_template("{input}"),
107
+ MessagesPlaceholder(variable_name="agent_scratchpad")
108
  ])
109
 
110
  sql_agent = create_sql_agent(
 
112
  toolkit=toolkit,
113
  prompt=agent_prompt,
114
  verbose=True,
115
+ agent_type="zero-shot-react-description",
116
  max_iterations=5
117
  )
118
 
 
129
  with st.spinner("Generating SQL query..."):
130
  response = sql_agent.run(user_prompt)
131
 
132
+ # Extract the SQL query from the agent's response
133
+ sql_query = response.strip()
134
+ logging.info(f"Generated SQL Query: {sql_query}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
+ # Attempt to execute SQL query and handle exceptions
137
  try:
138
+ result = pd.read_sql_query(sql_query, conn)
139
+
140
+ if result.empty:
141
+ assistant_response = "The query returned no results. Please try a different question."
142
+ st.session_state.history.append({"role": "assistant", "content": assistant_response})
 
 
143
  else:
144
+ # Limit the result to first 10 rows for display
145
+ result_display = result.head(10)
146
+ st.session_state.history.append({"role": "assistant", "content": "Here are the results:"})
147
+ st.session_state.history.append({"role": "assistant", "content": result_display})
148
+
149
+ # Generate insights based on the query result
150
+ insights_template = """
151
+ You are an expert data analyst. Based on the user's question and the SQL query result provided below, generate a concise analysis that includes key data insights and actionable recommendations. Limit the response to a maximum of 150 words.
152
+
153
+ User's Question: {question}
154
+
155
+ SQL Query Result:
156
+ {result}
157
+
158
+ Concise Analysis:
159
+ """
160
+ insights_prompt = PromptTemplate(template=insights_template, input_variables=['question', 'result'])
161
+ insights_chain = LLMChain(llm=llm, prompt=insights_prompt)
162
+
163
+ result_str = result_display.to_string(index=False)
164
+ insights = insights_chain.run({'question': user_prompt, 'result': result_str})
165
+
166
+ # Append the assistant's insights to the history
167
+ st.session_state.history.append({"role": "assistant", "content": insights})
168
  except Exception as e:
169
+ logging.error(f"An error occurred during SQL execution: {e}")
170
+ assistant_response = f"Error executing SQL query: {e}"
171
+ st.session_state.history.append({"role": "assistant", "content": assistant_response})
172
  except Exception as e:
173
  logging.error(f"An error occurred: {e}")
174
  assistant_response = f"Error: {e}"