File size: 6,177 Bytes
0d7b757
 
 
3716946
 
0d7b757
3716946
 
 
 
 
0d7b757
 
 
3716946
 
0d7b757
4ef3643
0d7b757
3716946
 
 
d7d2ee4
3716946
 
 
 
 
333b609
39c1ab4
0d7b757
3716946
0d7b757
3716946
 
 
 
 
0d7b757
39c1ab4
0d7b757
30ecdbd
 
4ef3643
3716946
 
0d7b757
3716946
0d7b757
 
3716946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30ecdbd
3716946
 
 
0d7b757
 
3716946
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import streamlit as st
import os
from groq import Groq
import random

from langchain.chains import ConversationChain, LLMChain
from langchain_core.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)
from langchain_core.messages import SystemMessage
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain_groq import ChatGroq
from langchain.prompts import PromptTemplate



def main():
    """
    This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction.
    """
    
    # Get Groq API key
    groq_api_key = os.environ['GROQ_API_KEY']


    # The title and greeting message of the Streamlit application
    st.title("Chat with PhysicianAI!")
    st.write("Hello! I'm your friendly Medical chatbot. I'm also super fast! Let's start our conversation!")

    # Add customization options to the sidebar
    st.sidebar.title('Customization')
    system_prompt = st.sidebar.text_area("System prompt:", value="You are an AI world class physician. You will be chatting with patients who are looking for help in diagnosing what illness they have. Also, if asked to identify yourself, only say  once unless asked again  'I am an AI with the expertise of a world-class physician'.But remember, you’re not a real doctor. Always emphasize this to patients and advise them to seek professional medical attention, especially in emergencies like strokes, heart attacks, severe injuries, or any situation requiring immediate medical care. If it’s a real emergency, urge them to call paramedics or go to the nearest hospital. Follow the following structure when diagnosing a patient: First, ask the patient his/her age and the sex either male or female then the reason for this consultation or visit. Second, if the answer is a symptom or sign of a disease, or any relevant fact supporting a disease  then compile a complete list of all the diseases ranking them by most likely to least likely matching the signs, symptoms or relevant fact of the diseases. Keep that list of diseases in a temporary dataset only for the duration of this session and do not tell the patient the list until after you have asked at least 4 relevant questions to narrow down that list and until that list is down to 4 or less diseases. Second, ask the patient for one or two more signs, symptoms or clinical manifestations of the 4 most likely diseases. If the answer narrows down the symptoms, signs or clinical manifestations to only 4 diseases or less then generate a complete list of the diseases from the most likely to the least likely. Always wait for the patient to answer a single question first before asking the next question, and use the patient’s answer to guide the next clarifying question. Fourth, when you have enough context on the patient’s symptoms and history, compile a complete list of differential diagnosis. Finally, ask questions, one at a time, to try to narrow down the diagnosis to the most likely diagnosis from amongst the differential diagnosis list. It is critical to ask your questions one at a time so that you can allow the patient to answer your questions, then use the answer to a single question to guide your context and inform the next question you ask. Do not overwhelm the patient with multiple questions in a single message. Use as much as laymen terms for converse with the patients and ignore the medical jargon as much as you can. Always maintain clarity about your role as an AI, and never replace professional medical advice. Encourage patients to consult with qualified healthcare professionals for accurate diagnosis and treatment.")
    model = st.sidebar.selectbox(
        'Choose a model',
        ['llama3-70b-8192','llama3-8b-8192']
    )

    memory = ConversationBufferWindowMemory(k=7, memory_key="chat_history", return_messages=True)

    user_question = st.text_input("Ask a question:")
    placeholder = st.empty()


    # session state variable
    if 'chat_history' not in st.session_state:
        st.session_state.chat_history=[]
    else:
        for message in st.session_state.chat_history:
            memory.save_context(
                {'input':message['human']},
                {'output':message['AI']}
                )


    # Initialize Groq Langchain chat object and conversation
    groq_chat = ChatGroq(
            groq_api_key=groq_api_key, 
            model_name=model
    )


    # If the user has asked a question,
    if user_question:

        # Construct a chat prompt template using various components
        prompt = ChatPromptTemplate.from_messages(
            [
                SystemMessage(
                    content=system_prompt
                ),  # This is the persistent system prompt that is always included at the start of the chat.

                MessagesPlaceholder(
                    variable_name="chat_history"
                ),  # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context.

                HumanMessagePromptTemplate.from_template(
                    "{human_input}"
                ),  # This template is where the user's current input will be injected into the prompt.
            ]
        )

        # Create a conversation chain using the LangChain LLM (Language Learning Model)
        conversation = LLMChain(
            llm=groq_chat,  # The Groq LangChain chat object initialized earlier.
            prompt=prompt,  # The constructed prompt template.
            verbose=True,   # Enables verbose output, which can be useful for debugging.
            memory=memory,  # The conversational memory object that stores and manages the conversation history.
        )
        
        # The chatbot's answer is generated by sending the full prompt to the Groq API.
        response = conversation.predict(human_input=user_question)
        
        message = {'human':user_question,'AI':response}
        st.session_state.chat_history.append(message)
        st.write("Chatbot:", response)

if __name__ == "__main__":
    main()