Spaces:
Sleeping
Sleeping
File size: 13,018 Bytes
0d5b65f bf3a381 7326838 bf3a381 0d5b65f 5e38052 9c25625 0d5b65f 377b9f8 0d5b65f e2f8b19 0d5b65f bc9ade5 0d5b65f 7a7d7b9 0d5b65f 377b9f8 fa7ca7f 0d5b65f 5c2c7fd 0d5b65f bf3a381 2226054 a9c719f 2226054 5e38052 0d5b65f e18f606 9c25625 e18f606 78dcf03 e18f606 0d5b65f a87a081 0d5b65f e18f606 0d5b65f 5e38052 be71a86 0d5b65f 5e38052 be71a86 5e38052 be71a86 e18f606 5e38052 9c25625 32d8f27 e18f606 0d5b65f 1a2830f 78dcf03 0d5b65f bf3a381 0d5b65f e18f606 0d5b65f e18f606 5e38052 e18f606 0d5b65f 736b44a 33e395a fb42d72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 |
import gradio as gr
import sqlite3
from openai import OpenAI
from pydantic import BaseModel
from tenacity import *
import os
api_key = os.getenv("OPENAI_API_KEY")
css = """
#codetitle {background-color: rgba(0,0,0,1.0);}
"""
client = OpenAI(api_key=api_key)
sample_questions = ["Are we reaching right patients?","Which channels are most effective in engaging patients?","What is the current ROI of omnichannel campaigns?","How long does it take from symptom to diagnosis?","Main reason for patients dropping off?"]
class Result(BaseModel):
steps: str
final_query: str
class Explanation(BaseModel):
explanation: list[str]
schema = open("database_schema.jsonc").read()
def execute_sql_directly(conn, sql_query, max_rows=50):
"""
Executes an SQL query directly using the sqlite3 module.
Args:
conn: The SQLite database connection object.
sql_query: The SQL query string to execute.
max_rows: The maximum number of rows to return.
Returns:
A tuple:
- success (bool): True if the query executed successfully, False otherwise.
- result (list/str): If success is True, a list of dictionaries
representing the query result.
If success is False, an error message string.
"""
cursor = conn.cursor()
try:
cursor.execute(sql_query)
rows = cursor.fetchall()
# Get column names
columns = [description[0] for description in cursor.description]
# Convert rows to a list of dictionaries
result = [dict(zip(columns, row)) for row in rows[:max_rows]] # Limit rows
return True, result
except sqlite3.Error as e:
return False, str(e)
@retry(wait=wait_fixed(1), stop=stop_after_attempt(3))
def call_llm(query):
"""call llm, get query, retry if execute query fails"""
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"schema: {schema} \n\n You are an expert in text to sql. Considering the schema above, think step by step and return sql query that could be executed via sqlite3. Try to keep it simple, use simple ratio or average wherever possible based on schema. \n\n query: {query} ",
}
],
}
]
response = client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=Result,
max_tokens=300,
temperature=0.01
)
res = response.choices[0].message.parsed
llm_generated_queries = [res.final_query]
# Connect to the SQLite database
conn = sqlite3.connect('test.db')
for i, sql_query in enumerate(llm_generated_queries):
print(f"\n[Query {i+1}]")
print(f"Executing SQL: {sql_query}")
success, result = execute_sql_directly(conn, sql_query)
final= ""
if success:
print("Query executed successfully.")
print(f"Result (limited to {len(result)} rows):")
if result:
# Print the header
header = list(result[0].keys())
print(header)
final = final + str(header)+ "\n"
for row in result:
print(list(row.values()))
final = final + str(list(row.values()))+ "\n"
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"schema: {schema} \n\n You are explaining the query and and more importantly the results to a non technical business person, step by step in 2-4 lines. Considering the schema above, think step by step and return explanation of the result and the query.\n\n query: {query} \n\n result : {result} ",
}
],
}
]
response = client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=Explanation,
max_tokens=250,
temperature=0.1
)
res = response.choices[0].message.parsed
print("Explanation:")
print(res.explanation)
explanation = res.explanation
else:
print("[]") # Empty result
else:
print(f"Query failed: {result}")
conn.close()
return explanation,llm_generated_queries[0]+"\n"+final
def get_explanation_and_code(query):
try:
explanation,result = call_llm(query)
text=""
for line in explanation:
text+="* "+line+"\n"
return text,result
except Exception as e:
print(f"Error: {e}")
return "This question may not be possible with the current integrated data. Please raise a request with your Incostra partner to enable such questions in the future.","Something went wrong, please try again!"
# --- Gradio App Definition ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky),css=css) as demo:
with gr.Row():
gr.Image("logo.png",width = 100, height=100,scale =0,show_download_button=False,container=False,show_fullscreen_button=False,show_share_button=False)
with gr.Column():
gr.Markdown("# Commercial and Patient Intelligence")
gr.Markdown("Your entire business, in one chat β get instant, real-time answers from all your omnichannel, patient etc. data, with zero technical effort or training needed")
# State to manage the visibility of the code area. True if visible, False if hidden.
initial_code_area_visibility = gr.State(False)
# --- Event Handlers ---
# Function to handle chat responses
def handle_chat_submission(message, chat_history):
"""
Called when the user sends a message.
Gets explanation and code, updates chat history and code display.
"""
explanation, code_snippet = get_explanation_and_code(message)
chat_history.append({"role": "user", "content":message})
chat_history.append({"role": "assistant", "content": explanation})
if message==sample_questions[1]:
chat_history.append({"role": "assistant", "content": gr.Image("test.png", show_label=False)})
# Returns: updated chat history, new code snippet
return chat_history, code_snippet
# Function to toggle the visibility of the code display area
def toggle_code_area_visibility(current_visibility_state):
"""
Toggles the visibility of the code display Group.
Updates the button text accordingly.
"""
new_visibility = not current_visibility_state
button_text = "β Hide Gen-AI created code" if new_visibility else "β Show Gen-AI created code"
# Returns: new visibility state, update for Group visibility, update for button text
return new_visibility, gr.update(visible=new_visibility), gr.update(value=button_text)
with gr.Row(equal_height=False):
# Column 1: Chat Interface
with gr.Column(scale=3): # Takes 3/5 of the width
chatbot_display = gr.Chatbot(
type="messages",
label="Explanation Chat",
autoscroll=True,
height=500 # Set a fixed height for the chat window
)
with gr.Row():
user_message_input = gr.Textbox(
label="What insights are you seeking? ",
placeholder="Type your question here...",
lines=1,
scale=4 # Textbox takes more horizontal space in this row
)
with gr.Row():
empty_space = gr.HTML("")
send_button = gr.Button("Ask", variant="primary", scale=1) # Send button
empty_space = gr.HTML("")
gr.Markdown("### Sample Omnichannel queries")
with gr.Row():
q1 = gr.Button(sample_questions[0],variant="primary")
with gr.Row():
q2 = gr.Button(sample_questions[1],variant="primary")
with gr.Row():
q3 = gr.Button(sample_questions[2],variant="primary")
gr.Markdown("### Sample Patient treatment queries")
with gr.Row():
q4 = gr.Button(sample_questions[3],variant="primary")
with gr.Row():
q5 = gr.Button(sample_questions[4],variant="primary")
with gr.Row():
empty_space = gr.HTML("")
view_report = gr.DownloadButton(label="View Report", variant="primary",value="report.pdf",scale = 1) # View report button
empty_space = gr.HTML("")
with gr.Row():
contact = gr.Markdown("Contact me on akshat@incostra.com to discuss any doubts or ideas you have in creating a data-driven organisation and culture.")
# Column 2: Code Sidebox (Toggleable)
with gr.Column(scale=2): # Takes 2/5 of the width
with gr.Row():
empty_space = gr.HTML("")
gr.Image("logo.png",width = 100, height=100, show_download_button=False,container=False,show_fullscreen_button=False,show_share_button=False)
empty_space = gr.HTML("")
toggle_code_button = gr.Button("β Show Gen-AI created code", variant="secondary")
# This Group contains the code display and is toggled
# Changed gr.Box to gr.Group
code_display_area = gr.Group(visible=False) # Initially hidden
with code_display_area:
gr.Markdown("Results extracted from aggregated datasets specially designed for Gen-AI conversational insights",elem_id="codetitle")
code_output_display = gr.Code(
value="# Code will appear here once you ask a question.",
language="python", # Default language, can be dynamic if needed
label="Code Snippet",
interactive=False, # Code is for display, not editing by user
lines=20 # Roughly corresponds to height
)
# Connect the Send button to the chat handler
send_button.click(
fn=handle_chat_submission,
inputs=[user_message_input, chatbot_display],
outputs=[chatbot_display, code_output_display] # Update chatbot and the code display
).then(
fn=lambda: gr.update(value=""), # Clear the input textbox
inputs=None,
outputs=[user_message_input]
)
q1.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[0], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q2.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[1], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q3.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[2], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q4.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[3], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q5.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[4], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
# Connect the Enter key in the textbox to the chat handler
user_message_input.submit(
fn=handle_chat_submission,
inputs=[user_message_input, chatbot_display],
outputs=[chatbot_display, code_output_display] # Update chatbot and the code display
).then(
fn=lambda: gr.update(value=""), # Clear the input textbox
inputs=None,
outputs=[user_message_input]
)
# Connect the toggle button to its handler
toggle_code_button.click(
fn=toggle_code_area_visibility,
inputs=[initial_code_area_visibility], # Pass the current visibility state
outputs=[
initial_code_area_visibility, # Update the state variable
code_display_area, # Update the visibility of the Group component
toggle_code_button # Update the button's text
]
)
if __name__ == "__main__":
demo.launch(auth=[(os.getenv("username"), os.getenv("password")),(os.getenv("username1"), os.getenv("password1"))],ssr_mode=False)
|