import streamlit as st import os from groq import Groq import random from langchain.chains import ConversationChain, LLMChain from langchain_core.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) from langchain_core.messages import SystemMessage from langchain.chains.conversation.memory import ConversationBufferWindowMemory from langchain_groq import ChatGroq from langchain.prompts import PromptTemplate def main(): """ This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction. """ # Get Groq API key groq_api_key = os.environ['GROQ_API_KEY'] # The title and greeting message of the Streamlit application st.title("Chat with PhysicianAI!") st.write("Hello! I'm your friendly Medical chatbot. I'm also super fast! Let's start our conversation!") # Add customization options to the sidebar st.sidebar.title('Customization') system_prompt = st.sidebar.text_area("System prompt:", value="You are an AI world class physician. You will be chatting with patients who are looking for help in diagnosing what illness they have. Also, if asked to identify yourself, only say once unless asked again 'I am an AI with the expertise of a world-class physician'.But remember, you’re not a real doctor. Always emphasize this to patients and advise them to seek professional medical attention, especially in emergencies like strokes, heart attacks, severe injuries, or any situation requiring immediate medical care. If it’s a real emergency, urge them to call paramedics or go to the nearest hospital. Follow the following structure when diagnosing a patient: First, ask the patient his/her age and the sex either male or female then the reason for this consultation or visit. Second, if the answer is a symptom or sign of a disease, or any relevant fact supporting a disease then compile a complete list of all the diseases ranking them by most likely to least likely matching the signs, symptoms or relevant fact of the diseases. Keep that list of diseases in a temporary dataset only for the duration of this session and do not tell the patient the list until after you have asked at least 4 relevant questions to narrow down that list and until that list is down to 4 or less diseases. Second, ask the patient for one or two more signs, symptoms or clinical manifestations of the 4 most likely diseases. If the answer narrows down the symptoms, signs or clinical manifestations to only 4 diseases or less then generate a complete list of the diseases from the most likely to the least likely. Always wait for the patient to answer a single question first before asking the next question, and use the patient’s answer to guide the next clarifying question. Fourth, when you have enough context on the patient’s symptoms and history, compile a complete list of differential diagnosis. Finally, ask questions, one at a time, to try to narrow down the diagnosis to the most likely diagnosis from amongst the differential diagnosis list. It is critical to ask your questions one at a time so that you can allow the patient to answer your questions, then use the answer to a single question to guide your context and inform the next question you ask. Do not overwhelm the patient with multiple questions in a single message. Use as much as laymen terms for converse with the patients and ignore the medical jargon as much as you can. Always maintain clarity about your role as an AI, and never replace professional medical advice. Encourage patients to consult with qualified healthcare professionals for accurate diagnosis and treatment.") model = st.sidebar.selectbox( 'Choose a model', ['llama3-70b-8192','llama3-8b-8192'] ) memory = ConversationBufferWindowMemory(k=7, memory_key="chat_history", return_messages=True) user_question = st.text_input("Ask a question:") placeholder = st.empty() # session state variable if 'chat_history' not in st.session_state: st.session_state.chat_history=[] else: for message in st.session_state.chat_history: memory.save_context( {'input':message['human']}, {'output':message['AI']} ) # Initialize Groq Langchain chat object and conversation groq_chat = ChatGroq( groq_api_key=groq_api_key, model_name=model ) # If the user has asked a question, if user_question: # Construct a chat prompt template using various components prompt = ChatPromptTemplate.from_messages( [ SystemMessage( content=system_prompt ), # This is the persistent system prompt that is always included at the start of the chat. MessagesPlaceholder( variable_name="chat_history" ), # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context. HumanMessagePromptTemplate.from_template( "{human_input}" ), # This template is where the user's current input will be injected into the prompt. ] ) # Create a conversation chain using the LangChain LLM (Language Learning Model) conversation = LLMChain( llm=groq_chat, # The Groq LangChain chat object initialized earlier. prompt=prompt, # The constructed prompt template. verbose=True, # Enables verbose output, which can be useful for debugging. memory=memory, # The conversational memory object that stores and manages the conversation history. ) # The chatbot's answer is generated by sending the full prompt to the Groq API. response = conversation.predict(human_input=user_question) message = {'human':user_question,'AI':response} st.session_state.chat_history.append(message) st.write("Chatbot:", response) if __name__ == "__main__": main()