webapp / llm.py
aristotletan's picture
Upload folder using huggingface_hub
b731d10
from pydantic.v1 import BaseModel, Field
from langchain.tools import tool
import requests
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.schema.agent import AgentFinish
from langchain.schema.runnable import RunnablePassthrough
from langchain.agents import AgentExecutor
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap
from langchain.prompts import MessagesPlaceholder
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.memory import ConversationBufferMemory
from dotenv import load_dotenv, find_dotenv
import openai
import os
import gradio as gr
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
model_name = 'gpt-4-1106-preview'
model = ChatOpenAI(temperature=0, model='gpt-4-1106-preview')
output_parser = StrOutputParser()
def invoke_llm(query, context, template):
prompt = ChatPromptTemplate.from_template(template)
chain = prompt | model | output_parser
return chain.invoke({"query": query, "context": context})
class SearchInput(BaseModel):
medical_condition: str = Field(..., description="The medical condition to base the search on.")
address: str = Field(..., description="The address or location to consider for proximity in the search.")
@tool(args_schema=SearchInput)
def search_healthcare_professionals(medical_condition: str, address: str) -> str:
"""
Search for healthcare professionals details based on their relevance to a specified medical condition
and proximity to a given address. Results are approximate and based on available data.
"""
BASE_URL = "http://localhost:8000/filter_by_address"
# Parameters for the request
params = {
'address': address,
}
response = requests.get(BASE_URL, params=params)
if response.status_code == 200:
results = response.json()
else:
raise Exception(f"API Request failed with status code: {response.status_code}")
# Part 2: Use LLM
template = """ You are a medical expert and have been provided with a list of healthcare professionals
along with their details including title, name, specialization, experience,
company, address, and distance. The data is as follows:
{context}
Query: I am seeking healthcare professionals who specialize in {query}. Based
on the provided data, please rank these professionals according to the relevance
of their specialization to the medical condition and their distance as mentioned
in the data. Provide a ranked tabular comparison, including their title, name,
specialization, experience, company, address, and distance."""
context = "Results from the database (within 10km):\n{df}".format(df=results)
results = invoke_llm(query=medical_condition, context=context, template=template)
return results
system_message = (
"""You are a helpful and professional assistant for Whitecoat360, a pharmacy service operating in Malaysia.
Your role is to assist users in finding and scheduling appointments with the most suitable pharmacists and nutritionists.
Start by asking users about their specific medical concerns or if they have a general health inquiry.
Use this information, along with their specific location, to employ the 'search_healthcare_professionals' function
for identifying the right specialists. Once an appropriate specialist is found, guide the users through the appointment setting process.
Remember, your assistance should focus on logistical support based on user needs and location;
avoid providing medical advice or consultations."""
)
prompt = ChatPromptTemplate.from_messages([
("system", system_message),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
tools = [search_healthcare_professionals]
functions = [format_tool_to_openai_function(f) for f in tools]
chat_model = ChatOpenAI(temperature=0, model='gpt-4-1106-preview').bind(functions=functions)
agent_chain = RunnablePassthrough.assign(
agent_scratchpad = lambda x: format_to_openai_functions(x["intermediate_steps"])
) | prompt | chat_model | OpenAIFunctionsAgentOutputParser()
def predict(message, history):
gradio_memory = ConversationBufferMemory(return_messages=True, memory_key="chat_history")
for human, ai in history:
gradio_memory.save_context({"input": human}, {"output": ai})
gradio_agent_executor = AgentExecutor(agent=agent_chain, tools=tools, verbose=False, memory=gradio_memory)
gpt_response = gradio_agent_executor.invoke({"input": message})
return gpt_response['output']
gr.ChatInterface(predict).launch(share=True)
# print(search_healthcare_professionals({"medical_condition":"fever", "address":"phileo"}))
# Implementation of the search logic goes here
# res = pharma_search(df, medical_condition, address)
# return res