|
""" |
|
Defines the Gradio user interface and manages the application's state |
|
and event handling. |
|
|
|
This module is responsible for the presentation layer of the application. |
|
It creates the interactive components and orchestrates the analysis workflow |
|
by calling functions from the data_processing module. |
|
""" |
|
|
|
|
|
import gradio as gr |
|
import json |
|
import concurrent.futures |
|
from data_processing import ( |
|
llm_generate_analysis_plan_with_history, |
|
execute_quantitative_query, |
|
execute_qualitative_query, |
|
llm_synthesize_enriched_report_stream, |
|
llm_generate_visualization_code, |
|
execute_viz_code_and_get_path, |
|
parse_suggestions_from_report |
|
) |
|
|
|
def create_ui(llm_model, solr_client): |
|
""" |
|
Builds the Gradio UI and wires up all the event handlers. |
|
|
|
Args: |
|
llm_model: The initialized Google Gemini model client. |
|
solr_client: The initialized pysolr client. |
|
""" |
|
with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo: |
|
state = gr.State() |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=4): |
|
gr.Markdown("# PharmaCircle AI Data Analyst") |
|
with gr.Column(scale=1): |
|
clear_button = gr.Button("π Start New Analysis", variant="primary") |
|
|
|
gr.Markdown("Ask a question to begin your analysis. I will generate an analysis plan, retrieve quantitative and qualitative data, create a visualization, and write an enriched report.") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
chatbot = gr.Chatbot(label="Analysis Chat Log", height=700, show_copy_button=True) |
|
msg_textbox = gr.Textbox(placeholder="Ask a question, e.g., 'Show me the top 5 companies by total deal value in 2023'", label="Your Question", interactive=True) |
|
|
|
with gr.Column(scale=2): |
|
with gr.Accordion("Generated Analysis Plan", open=False): |
|
plan_display = gr.Markdown("Plan will appear here...", visible=True) |
|
with gr.Accordion("Retrieved Quantitative Data", open=False): |
|
quantitative_data_display = gr.Markdown("Aggregate data will appear here...", visible=False) |
|
with gr.Accordion("Retrieved Qualitative Data (Examples)", open=False): |
|
qualitative_data_display = gr.Markdown("Example data will appear here...", visible=False) |
|
plot_display = gr.Image(label="Visualization", type="filepath", visible=False) |
|
report_display = gr.Markdown("Report will be streamed here...", visible=False) |
|
|
|
def process_analysis_flow(user_input, history, state): |
|
""" |
|
Manages the conversation and yields UI updates. |
|
""" |
|
if state is None: |
|
state = {'query_count': 0, 'last_suggestions': []} |
|
if history is None: |
|
history = [] |
|
|
|
yield (history, state, gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False)) |
|
|
|
query_context = user_input.strip() |
|
if not query_context: |
|
history.append((user_input, "Please enter a question to analyze.")) |
|
yield (history, state, None, None, None, None, None) |
|
return |
|
|
|
history.append((user_input, f"Analyzing: '{query_context}'\n\n*Generating analysis plan...*")) |
|
yield (history, state, None, None, None, None, None) |
|
|
|
analysis_plan = llm_generate_analysis_plan_with_history(llm_model, query_context, history) |
|
if not analysis_plan: |
|
history.append((None, "I'm sorry, I couldn't generate a valid analysis plan. Please try rephrasing.")) |
|
yield (history, state, None, None, None, None, None) |
|
return |
|
|
|
history.append((None, "β
Analysis plan generated!")) |
|
plan_summary = f""" |
|
* **Analysis Dimension:** `{analysis_plan.get('analysis_dimension')}` |
|
* **Analysis Measure:** `{analysis_plan.get('analysis_measure')}` |
|
* **Query Filter:** `{analysis_plan.get('query_filter')}` |
|
""" |
|
history.append((None, plan_summary)) |
|
formatted_plan = f"**Full Analysis Plan:**\n```json\n{json.dumps(analysis_plan, indent=2)}\n```" |
|
yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None) |
|
|
|
history.append((None, "*Executing queries for aggregates and examples...*")) |
|
yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None) |
|
|
|
aggregate_data = None |
|
example_data = None |
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
future_agg = executor.submit(execute_quantitative_query, solr_client, analysis_plan) |
|
future_ex = executor.submit(execute_qualitative_query, solr_client, analysis_plan) |
|
aggregate_data = future_agg.result() |
|
example_data = future_ex.result() |
|
|
|
if not aggregate_data or aggregate_data.get('count', 0) == 0: |
|
history.append((None, "No data was found for your query. Please try a different question.")) |
|
yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None) |
|
return |
|
|
|
formatted_agg_data = f"**Quantitative (Aggregate) Data:**\n```json\n{json.dumps(aggregate_data, indent=2)}\n```" |
|
formatted_qual_data = f"**Qualitative (Example) Data:**\n```json\n{json.dumps(example_data, indent=2)}\n```" |
|
qual_data_display_update = gr.update(value=formatted_qual_data, visible=True) |
|
yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), gr.update(value=formatted_agg_data, visible=True), qual_data_display_update) |
|
|
|
history.append((None, "β
Data retrieved. Generating visualization and final report...")) |
|
yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), gr.update(value=formatted_agg_data, visible=True), qual_data_display_update) |
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
viz_future = executor.submit(llm_generate_visualization_code, llm_model, query_context, aggregate_data) |
|
|
|
report_text = "" |
|
stream_history = history[:] |
|
for chunk in llm_synthesize_enriched_report_stream(llm_model, query_context, aggregate_data, example_data, analysis_plan): |
|
report_text += chunk |
|
yield (stream_history, state, None, gr.update(value=report_text, visible=True), gr.update(value=formatted_plan, visible=True), gr.update(value=formatted_agg_data, visible=True), qual_data_display_update) |
|
|
|
history.append((None, report_text)) |
|
|
|
viz_code = viz_future.result() |
|
plot_path = execute_viz_code_and_get_path(viz_code, aggregate_data) |
|
output_plot = gr.update(value=plot_path, visible=True) if plot_path else gr.update(visible=False) |
|
if not plot_path: |
|
history.append((None, "*I was unable to generate a plot for this data.*\n")) |
|
|
|
yield (history, state, output_plot, report_text, gr.update(value=formatted_plan, visible=True), gr.update(value=formatted_agg_data, visible=True), qual_data_display_update) |
|
|
|
state['query_count'] += 1 |
|
state['last_suggestions'] = parse_suggestions_from_report(report_text) |
|
next_prompt = "Analysis complete. What would you like to explore next?" |
|
history.append((None, next_prompt)) |
|
yield (history, state, output_plot, report_text, gr.update(value=formatted_plan, visible=True), gr.update(value=formatted_agg_data, visible=True), qual_data_display_update) |
|
|
|
def reset_all(): |
|
"""Resets the entire UI for a new analysis session.""" |
|
return ( |
|
[], |
|
None, |
|
"", |
|
gr.update(value=None, visible=False), |
|
gr.update(value=None, visible=False), |
|
gr.update(value=None, visible=False), |
|
gr.update(value=None, visible=False), |
|
gr.update(value=None, visible=False) |
|
) |
|
|
|
msg_textbox.submit( |
|
fn=process_analysis_flow, |
|
inputs=[msg_textbox, chatbot, state], |
|
outputs=[chatbot, state, plot_display, report_display, plan_display, quantitative_data_display, qualitative_data_display], |
|
).then( |
|
lambda: gr.update(value=""), |
|
None, |
|
[msg_textbox], |
|
queue=False, |
|
) |
|
|
|
clear_button.click( |
|
fn=reset_all, |
|
inputs=None, |
|
outputs=[chatbot, state, msg_textbox, plot_display, report_display, plan_display, quantitative_data_display, qualitative_data_display], |
|
queue=False |
|
) |
|
|
|
return demo |
|
|
|
|