| | import pandas as pd |
| |
|
| |
|
| | df = pd.read_csv('./medical_data.csv') |
| | df1=pd.read_csv('./DrugData.csv') |
| | df11=pd.read_csv('./drugs_side_effects_drugs_com.csv') |
| |
|
| | context_data = [] |
| | for i in range(len(df)): |
| | context = "" |
| | for j in range(3): |
| | context += df.columns[j] |
| | context += ": " |
| | context += df.iloc[i][j] |
| | context += " " |
| | context_data.append(context) |
| |
|
| | context_data |
| |
|
| | for i in range(len(df1)): |
| | context = "" |
| | for j in range(19): |
| | context += df1.columns[j] |
| | context += ": " |
| | context += str(df1.iloc[i][j]) |
| | context += " " |
| | context_data.append(context) |
| | context_data |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import os |
| |
|
| | |
| | groq_key = os.environ.get('groq_key') |
| |
|
| | |
| | from langchain_groq import ChatGroq |
| |
|
| | llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key) |
| |
|
| | |
| | from langchain_huggingface import HuggingFaceEmbeddings |
| | embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1") |
| |
|
| | |
| | from langchain_chroma import Chroma |
| |
|
| | vectorstore = Chroma( |
| | collection_name="medical_dataset_store", |
| | embedding_function=embed_model, |
| | persist_directory="./", |
| | ) |
| |
|
| | |
| | vectorstore.add_texts(context_data) |
| |
|
| | retriever = vectorstore.as_retriever() |
| |
|
| | from langchain_core.prompts import PromptTemplate |
| |
|
| | template = ("""You are a medical expert. |
| | Use the provided context to answer the question. |
| | If you don't know the answer, say so. Explain your answer in detail. |
| | Do not discuss the context in your response; just provide the answer directly. |
| | Context: {context} |
| | Question: {question} |
| | Answer:""") |
| |
|
| | rag_prompt = PromptTemplate.from_template(template) |
| |
|
| | from langchain_core.output_parsers import StrOutputParser |
| | from langchain_core.runnables import RunnablePassthrough |
| |
|
| | rag_chain = ( |
| | {"context": retriever, "question": RunnablePassthrough()} |
| | | rag_prompt |
| | | llm |
| | | StrOutputParser() |
| | ) |
| |
|
| | import gradio as gr |
| |
|
| | def rag_memory_stream(text): |
| | partial_text = "" |
| | for new_text in rag_chain.stream(text): |
| | partial_text += new_text |
| | yield partial_text |
| |
|
| | examples = ['I feel dizzy', 'what is the possible sickness for fatigue'] |
| |
|
| |
|
| |
|
| |
|
| | title = "CARE360BOT FOR MEDICATION EXPLAINATIONS AND MEDICAL CONDITIONS" |
| | demo = gr.Interface( |
| | title=title, |
| | fn=rag_memory_stream, |
| | inputs="text", |
| | outputs="text", |
| | examples=examples, |
| | allow_flagging="never", |
| | ) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | demo.launch() |
| |
|