import gradio as gr import os import boto3 from llama_index import GPTSimpleVectorIndex from langchain.agents import ZeroShotAgent, AgentExecutor from langchain.agents import Tool from langchain import OpenAI, LLMChain from cachetools import cached, TTLCache import openai s3 = boto3.resource('s3') bucket_name = "notesinendocrinology" combo_index_path = "comboindex.json" nafld_path = "nafld.json" osteoporosis_path = "osteoporosis.json" def keywords(query2): index1 = None prefix_answer1 = 'According to NIE:' suffix_answer1 = 'Lakhani OJ. EndoAI Answer [Internet]. Notes in Endocrinology. [cited 2023Mar31]. Available from: endocrinology.co.in' if 'NASH' in query2 or 'NAFLD' in query2 or 'Non-alcoholic fatty liver disease' in query2: index_path = nafld_path prefix_answer1 = "Here is the answer based on Notes in Endocrinology and American Association of Clinical Endocrinology Clinical Practice Guideline for the Diagnosis and Management of Nonalcoholic Fatty Liver Disease in Primary Care and Endocrinology Clinical Settings:" suffix_answer1 = """Citation: \n 1. Cusi, Kenneth, Scott Isaacs, Diana Barb, Rita Basu, Sonia Caprio, W. Timothy Garvey, Sangeeta Kashyap et al. "American Association of Clinical Endocrinology clinical practice guideline for the diagnosis and management of nonalcoholic fatty liver disease in primary care and endocrinology clinical settings: co-sponsored by the American Association for the Study of Liver Diseases (AASLD)." Endocrine Practice 28, no. 5 (2022): 528-562. 2. Lakhani OJ. EndoAI Answer [Internet]. Notes in Endocrinology. [cited 2023Mar31]. Available from: endocrinology.co.in """ elif 'osteoporosis' in query2 or 'osteopenia' in query2 or 'low bone mass' in query2 or 'DEXA-BMD' in query2 or 'BMD' in query2 or 'Osteoporosis' in query2: index_path = osteoporosis_path prefix_answer1 = "According to : Pharmacological Management of Osteoporosis in Postmenopausal Women: An Endocrine Society* Clinical Practice Guideline & Notes in Endocrinology" suffix_answer1 = """Citation: \n 1. Eastell R, Rosen CJ, Black DM, Cheung AM, Murad MH, Shoback D. Pharmacological management of osteoporosis in postmenopausal women: an Endocrine Society clinical practice guideline. The Journal of Clinical Endocrinology & Metabolism. 2019 May;104(5):1595-622. 2. Lakhani OJ. EndoAI Answer [Internet]. Notes in Endocrinology. [cited 2023Mar31]. Available from: endocrinology.co.in """ else: index_path = combo_index_path if index1 is None: s3.Bucket(bucket_name).download_file(index_path, index_path.split("/")[-1]) print(f"Downloaded {index_path}") index1 = GPTSimpleVectorIndex.load_from_disk(index_path) return index1, prefix_answer1, suffix_answer1 def send_message(message_log): # Use OpenAI's ChatCompletion API to get the chatbot's response response = openai.ChatCompletion.create( model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use messages=message_log, # The conversation history up to this point, as a list of dictionaries max_tokens=512, # The maximum number of tokens (words or subwords) in the generated response stop=None, # The stopping sequence for the generated response, if any (not used here) temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative) ) # Find the first response from the chatbot that has text in it (some responses may not have text) for choice in response.choices: if "text" in choice: return choice.text # If no response with text is found, return the first response's content (which may be empty) return response.choices[0].message.content def generate_variations(question): def extract(input): message_log = [{"role": "system", "content": input}] user_input = f"Generate one follow-up question from the following question: {input}. Give one more question only. The question is intended for knowledgeable doctors" message_log.append({"role": "user", "content": user_input}) response = send_message(message_log) message_log.append({"role": "assistant", "content": response}) text = str(response) print(response) return response input2 = question my_string = "0. " + question output = extract(input2) output_list = output.split("\n") final_list = [my_string] + output_list print(final_list) return final_list def querying_db(query: str): get_index1 = keywords(query)[0] response = get_index1.query(query, response_mode="default") return response tools = [ Tool( name="QueryingDB", func=querying_db, description="useful for when you need to answer questions from the database. The answer is for knowledgeable doctors", return_direct=True ) ] prefix = "Give an answer to the question" suffix = """Give answer intended for knowledgeable doctors Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "agent_scratchpad"] ) llm_chain = LLMChain(llm=OpenAI(temperature=0.5), prompt=prompt) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) def get_answer(query_string): agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) response = agent_executor.run(query_string) result = f"{response}" return result def get_answer2(list_thing): responses = [] for question in list_thing: answer = get_answer(question) response = f"{question}\n{answer}" responses.append(response) return "\n\n".join(responses) def consolidated_answer(question, oginput): def extract(input): message_log = [{"role": "system", "content": input}] user_input = f"Give a consolidated answer from this: {input}. It should answer the original question {oginput}. The answer is for knowledgeable doctors so use medical terms." message_log.append({"role": "user", "content": user_input}) response = send_message(message_log) message_log.append({"role": "assistant", "content": response}) text = str(response) print(response) return response input2 = question output = extract(input2) print(output) return output def qa_app(query1): # 1. function that checks if relevant keyword is found in the query and get the relevant index # 2. generate question variartions question_variations_list = generate_variations(query1) prefix_answer = keywords(query1)[1] suffix_answer = keywords(query1)[2] # whichindex = keywords(query1)[0] # if whichindex == 'nafld.json': # prefix_answer = "Here is the answer based on Notes in Endocrinology and American Association of Clinical Endocrinology Clinical Practice Guideline for the Diagnosis and Management of Nonalcoholic Fatty Liver Disease in Primary Care and Endocrinology Clinical Settings:" # suffix_answer = """Citation: \n # # 1. Cusi, Kenneth, Scott Isaacs, Diana Barb, Rita Basu, Sonia Caprio, W. Timothy Garvey, Sangeeta Kashyap et al. "American Association of Clinical Endocrinology clinical practice guideline for the diagnosis and management of nonalcoholic fatty liver disease in primary care and endocrinology clinical settings: co-sponsored by the American Association for the Study of Liver Diseases (AASLD)." Endocrine Practice 28, no. 5 (2022): 528-562. # # """ # # elif whichindex == 'osteoporosis_new.json': # prefix_answer = "According to : Pharmacological Management of Osteoporosis in Postmenopausal Women: An Endocrine Society* Clinical Practice Guideline & Notes in Endocrinology" # suffix_answer = """Citation: \n # # 1. Eastell R, Rosen CJ, Black DM, Cheung AM, Murad MH, Shoback D. Pharmacological management of osteoporosis in postmenopausal women: an Endocrine Society clinical practice guideline. The Journal of Clinical Endocrinology & Metabolism. 2019 May;104(5):1595-622. # """ # # else: # prefix_answer = "According to NIE:" # suffix_answer = "Citation: NIE" big_answer = get_answer2(question_variations_list) final_answer = consolidated_answer(big_answer, query1) final_answer_with_citation = prefix_answer + "\n\n" + final_answer + "\n\n" + suffix_answer return final_answer_with_citation inputs = [ gr.inputs.Textbox(label="Enter your question:"), ] output = gr.outputs.Textbox(label="Answer:") iface = gr.Interface( fn=qa_app, inputs=inputs, outputs=output, title="Endo AI : Endocrine answering app by Dr. Om J Lakhani" ) iface.launch()