File size: 5,675 Bytes
63926b6
c04c5d4
9949782
2fe35fb
c04c5d4
0e78607
 
 
 
 
 
d555c1c
 
 
 
 
 
63926b6
0e78607
7f6aef7
d3ac1af
d9f68e9
7f6aef7
0e78607
7f6aef7
63926b6
c04c5d4
 
63926b6
7f6aef7
c04c5d4
0e78607
beb73d5
 
 
 
c04c5d4
d381cc6
46e07a5
4307606
d381cc6
b510d01
 
 
beb73d5
 
 
 
 
 
9a8bb65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fe35fb
 
9a8bb65
2fe35fb
 
 
 
 
 
 
8e4b937
9a8bb65
721dd7d
8e4b937
 
9a8bb65
8e4b937
9a8bb65
 
 
 
 
 
8919c08
 
721dd7d
8d61ef2
 
 
9a8bb65
 
 
 
 
721dd7d
9a8bb65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8919c08
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import streamlit as st
from styles import apply_styling
from utils import remove_reasoning_and_sources, clean_explanation
from session_state import initialize_session_state, add_message_to_history, get_full_history
from chat_display import display_chat_history, show_typing_indicator, display_legal_disclaimer
from model import (
    orchestrator_chat,
    fetch_medical_evidence,
    extract_and_link_sources,
    parse_doctor_response
)
from report_generator import (
    extract_medical_json,
    build_medical_report,
    generate_and_download_report,
    show_email_form
)

# Set page config with dark theme
st.set_page_config(
    page_title="Daease",
    page_icon=None,
    layout="wide",
    initial_sidebar_state="expanded"  # Keep sidebar expanded to show the toggle
)

# Apply custom styling
apply_styling()

# Initialize session state
initialize_session_state()

# Initialize conversation_lock if not present
if 'conversation_lock' not in st.session_state:
    st.session_state.conversation_lock = False

# Put the toggle in the sidebar
with st.sidebar:
    st.header("Features")
    st.session_state.use_rag = st.toggle("Database Search", value=False, 
                                       help="Toggle to enable or disable medical database search")
    
    # Add Generate Report button
    if st.button("Generate Report", use_container_width=True):
        # Only allow report generation if not in conversation
        if not st.session_state.get('processing', False) and not st.session_state.get('conversation_lock', False):
            st.session_state.show_report_form = True
            # Reset report step for a new report
            st.session_state.report_step = 1
            st.rerun()
    
    # # Add End Conversation button
    # if st.button("End Conversation", use_container_width=True):
    #     end_conversation()
    #     st.rerun()
    
    # Show report form when button is clicked - only if not processing conversation
    if st.session_state.get('show_report_form', False) and not st.session_state.get('processing', False) and not st.session_state.get('conversation_lock', False):
        st.subheader("Report Information")
        generate_and_download_report()

# Main app area
st.title("What are you Diagnosing Today?")

# Display chat history
display_chat_history()

# Display typing indicator if processing
show_typing_indicator()

# Chat input
if prompt := st.chat_input("Describe your symptoms or ask a medical question..."):
    # Set conversation lock to prioritize conversation over report generation
    st.session_state.conversation_lock = True
    
    # Add user message to history using the database-backed function
    add_message_to_history({"role": "user", "content": prompt})
    
    # Set processing flag to true
    st.session_state.processing = True
    
    # Force refresh to show the typing indicator
    st.rerun()

# Check if we need to process a response (this block runs after the rerun if processing is True)
if st.session_state.processing:
    try:
        # Get the full history from database
        full_history = get_full_history()
        
        if full_history: # Ensure history is not empty
            current_user_prompt_message = full_history[-1] # The last message is the current user's prompt
            
            # The history for the orchestrator is all messages EXCEPT the last one
            history_for_orchestrator = full_history[:-1]
            
            # The query for the orchestrator is the content of the last user message
            current_query = current_user_prompt_message["content"]
            
            reply, explanation, follow_up_questions, evidence = orchestrator_chat(
                history_for_orchestrator,
                current_query,
                use_rag=st.session_state.use_rag,
                is_follow_up=len(history_for_orchestrator) > 0
            )
            
            # Clean up the response and explanation
            cleaned_reply = remove_reasoning_and_sources(reply)
            cleaned_explanation = clean_explanation(explanation) if explanation else ""
            
            # Store follow-up questions directly (it's already formatted as a string by orchestrator_chat)
            formatted_follow_up = follow_up_questions
            
            # Debug: Print the follow-up questions to verify they're being generated
            print(f"Follow-up questions to be stored: {formatted_follow_up}")
            
            # Add assistant response to history
            assistant_message = {
                "role": "assistant", 
                "content": cleaned_reply,
                "explanation": cleaned_explanation,
                "follow_up_questions": formatted_follow_up,
                "evidence": evidence if evidence else []
            }
            
            # Save to database-backed history
            add_message_to_history(assistant_message)
    finally:
        # Set processing back to false regardless of success/failure
        st.session_state.processing = False
        # Release conversation lock after response is processed
        st.session_state.conversation_lock = False
        
    # Force refresh to update the UI with the response
    st.rerun()

# Small, unobtrusive legal disclaimer 
display_legal_disclaimer()

# This ensures app.py can be used as a direct entry point for deployment platforms like Hugging Face
if __name__ == "__main__":
    # Add the current directory to the Python path if needed
    import os
    import sys
    current_dir = os.path.dirname(os.path.abspath(__file__))
    if current_dir not in sys.path:
        sys.path.insert(0, current_dir)