healthdemo / app.py
MayankLad31's picture
Update app.py
7326838 verified
import gradio as gr
import sqlite3
from openai import OpenAI
from pydantic import BaseModel
from tenacity import *
import os
api_key = os.getenv("OPENAI_API_KEY")
css = """
#codetitle {background-color: rgba(0,0,0,1.0);}
"""
client = OpenAI(api_key=api_key)
sample_questions = ["Are we reaching right patients?","Which channels are most effective in engaging patients?","What is the current ROI of omnichannel campaigns?","How long does it take from symptom to diagnosis?","Main reason for patients dropping off?"]
class Result(BaseModel):
steps: str
final_query: str
class Explanation(BaseModel):
explanation: list[str]
schema = open("database_schema.jsonc").read()
def execute_sql_directly(conn, sql_query, max_rows=50):
"""
Executes an SQL query directly using the sqlite3 module.
Args:
conn: The SQLite database connection object.
sql_query: The SQL query string to execute.
max_rows: The maximum number of rows to return.
Returns:
A tuple:
- success (bool): True if the query executed successfully, False otherwise.
- result (list/str): If success is True, a list of dictionaries
representing the query result.
If success is False, an error message string.
"""
cursor = conn.cursor()
try:
cursor.execute(sql_query)
rows = cursor.fetchall()
# Get column names
columns = [description[0] for description in cursor.description]
# Convert rows to a list of dictionaries
result = [dict(zip(columns, row)) for row in rows[:max_rows]] # Limit rows
return True, result
except sqlite3.Error as e:
return False, str(e)
@retry(wait=wait_fixed(1), stop=stop_after_attempt(3))
def call_llm(query):
"""call llm, get query, retry if execute query fails"""
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"schema: {schema} \n\n You are an expert in text to sql. Considering the schema above, think step by step and return sql query that could be executed via sqlite3. Try to keep it simple, use simple ratio or average wherever possible based on schema. \n\n query: {query} ",
}
],
}
]
response = client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=Result,
max_tokens=300,
temperature=0.01
)
res = response.choices[0].message.parsed
llm_generated_queries = [res.final_query]
# Connect to the SQLite database
conn = sqlite3.connect('test.db')
for i, sql_query in enumerate(llm_generated_queries):
print(f"\n[Query {i+1}]")
print(f"Executing SQL: {sql_query}")
success, result = execute_sql_directly(conn, sql_query)
final= ""
if success:
print("Query executed successfully.")
print(f"Result (limited to {len(result)} rows):")
if result:
# Print the header
header = list(result[0].keys())
print(header)
final = final + str(header)+ "\n"
for row in result:
print(list(row.values()))
final = final + str(list(row.values()))+ "\n"
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"schema: {schema} \n\n You are explaining the query and and more importantly the results to a non technical business person, step by step in 2-4 lines. Considering the schema above, think step by step and return explanation of the result and the query.\n\n query: {query} \n\n result : {result} ",
}
],
}
]
response = client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=Explanation,
max_tokens=250,
temperature=0.1
)
res = response.choices[0].message.parsed
print("Explanation:")
print(res.explanation)
explanation = res.explanation
else:
print("[]") # Empty result
else:
print(f"Query failed: {result}")
conn.close()
return explanation,llm_generated_queries[0]+"\n"+final
def get_explanation_and_code(query):
try:
explanation,result = call_llm(query)
text=""
for line in explanation:
text+="* "+line+"\n"
return text,result
except Exception as e:
print(f"Error: {e}")
return "This question may not be possible with the current integrated data. Please raise a request with your Incostra partner to enable such questions in the future.","Something went wrong, please try again!"
# --- Gradio App Definition ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky),css=css) as demo:
with gr.Row():
gr.Image("logo.png",width = 100, height=100,scale =0,show_download_button=False,container=False,show_fullscreen_button=False,show_share_button=False)
with gr.Column():
gr.Markdown("# Commercial and Patient Intelligence")
gr.Markdown("Your entire business, in one chat — get instant, real-time answers from all your omnichannel, patient etc. data, with zero technical effort or training needed")
# State to manage the visibility of the code area. True if visible, False if hidden.
initial_code_area_visibility = gr.State(False)
# --- Event Handlers ---
# Function to handle chat responses
def handle_chat_submission(message, chat_history):
"""
Called when the user sends a message.
Gets explanation and code, updates chat history and code display.
"""
explanation, code_snippet = get_explanation_and_code(message)
chat_history.append({"role": "user", "content":message})
chat_history.append({"role": "assistant", "content": explanation})
if message==sample_questions[1]:
chat_history.append({"role": "assistant", "content": gr.Image("test.png", show_label=False)})
# Returns: updated chat history, new code snippet
return chat_history, code_snippet
# Function to toggle the visibility of the code display area
def toggle_code_area_visibility(current_visibility_state):
"""
Toggles the visibility of the code display Group.
Updates the button text accordingly.
"""
new_visibility = not current_visibility_state
button_text = "➖ Hide Gen-AI created code" if new_visibility else "➕ Show Gen-AI created code"
# Returns: new visibility state, update for Group visibility, update for button text
return new_visibility, gr.update(visible=new_visibility), gr.update(value=button_text)
with gr.Row(equal_height=False):
# Column 1: Chat Interface
with gr.Column(scale=3): # Takes 3/5 of the width
chatbot_display = gr.Chatbot(
type="messages",
label="Explanation Chat",
autoscroll=True,
height=500 # Set a fixed height for the chat window
)
with gr.Row():
user_message_input = gr.Textbox(
label="What insights are you seeking? ",
placeholder="Type your question here...",
lines=1,
scale=4 # Textbox takes more horizontal space in this row
)
with gr.Row():
empty_space = gr.HTML("")
send_button = gr.Button("Ask", variant="primary", scale=1) # Send button
empty_space = gr.HTML("")
gr.Markdown("### Sample Omnichannel queries")
with gr.Row():
q1 = gr.Button(sample_questions[0],variant="primary")
with gr.Row():
q2 = gr.Button(sample_questions[1],variant="primary")
with gr.Row():
q3 = gr.Button(sample_questions[2],variant="primary")
gr.Markdown("### Sample Patient treatment queries")
with gr.Row():
q4 = gr.Button(sample_questions[3],variant="primary")
with gr.Row():
q5 = gr.Button(sample_questions[4],variant="primary")
with gr.Row():
empty_space = gr.HTML("")
view_report = gr.DownloadButton(label="View Report", variant="primary",value="report.pdf",scale = 1) # View report button
empty_space = gr.HTML("")
with gr.Row():
contact = gr.Markdown("Contact me on akshat@incostra.com to discuss any doubts or ideas you have in creating a data-driven organisation and culture.")
# Column 2: Code Sidebox (Toggleable)
with gr.Column(scale=2): # Takes 2/5 of the width
with gr.Row():
empty_space = gr.HTML("")
gr.Image("logo.png",width = 100, height=100, show_download_button=False,container=False,show_fullscreen_button=False,show_share_button=False)
empty_space = gr.HTML("")
toggle_code_button = gr.Button("➕ Show Gen-AI created code", variant="secondary")
# This Group contains the code display and is toggled
# Changed gr.Box to gr.Group
code_display_area = gr.Group(visible=False) # Initially hidden
with code_display_area:
gr.Markdown("Results extracted from aggregated datasets specially designed for Gen-AI conversational insights",elem_id="codetitle")
code_output_display = gr.Code(
value="# Code will appear here once you ask a question.",
language="python", # Default language, can be dynamic if needed
label="Code Snippet",
interactive=False, # Code is for display, not editing by user
lines=20 # Roughly corresponds to height
)
# Connect the Send button to the chat handler
send_button.click(
fn=handle_chat_submission,
inputs=[user_message_input, chatbot_display],
outputs=[chatbot_display, code_output_display] # Update chatbot and the code display
).then(
fn=lambda: gr.update(value=""), # Clear the input textbox
inputs=None,
outputs=[user_message_input]
)
q1.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[0], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q2.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[1], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q3.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[2], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q4.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[3], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
q5.click(
fn=lambda chatbot: handle_chat_submission(sample_questions[4], chatbot),
inputs=[chatbot_display],
outputs=[chatbot_display, code_output_display]
)
# Connect the Enter key in the textbox to the chat handler
user_message_input.submit(
fn=handle_chat_submission,
inputs=[user_message_input, chatbot_display],
outputs=[chatbot_display, code_output_display] # Update chatbot and the code display
).then(
fn=lambda: gr.update(value=""), # Clear the input textbox
inputs=None,
outputs=[user_message_input]
)
# Connect the toggle button to its handler
toggle_code_button.click(
fn=toggle_code_area_visibility,
inputs=[initial_code_area_visibility], # Pass the current visibility state
outputs=[
initial_code_area_visibility, # Update the state variable
code_display_area, # Update the visibility of the Group component
toggle_code_button # Update the button's text
]
)
if __name__ == "__main__":
demo.launch(auth=[(os.getenv("username"), os.getenv("password")),(os.getenv("username1"), os.getenv("password1"))],ssr_mode=False)