import gradio as gr from langchain.prompts import PromptTemplate import os from langchain.vectorstores import Chroma from getpass import getpass from langchain.embeddings.openai import OpenAIEmbeddings from langchain.chains import RetrievalQA, LLMChain from langchain.chat_models import ChatOpenAI OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') model_name = "text-embedding-ada-002" # get openai api key from platform.openai.com OAIembeddings = OpenAIEmbeddings( model=model_name, openai_api_key=OPENAI_API_KEY, disallowed_special=() ) load_vector_store = Chroma(persist_directory="iupui_openai_store_final", embedding_function=OAIembeddings) prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. Helpful answer: """ prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question']) retriever = load_vector_store.as_retriever(search_kwargs={"k":5}) llm = ChatOpenAI(temperature=0.7, model='gpt-3.5-turbo',openai_api_key=OPENAI_API_KEY) sample_prompts = ["what is HCI?","Tell me more about IUPUI buildings","UITS",'How is research at Computer Science department?'] def parse_json_result(data): # Parse the JSON data #data = json.loads(json_data) # Initialize a list to hold the parsed results parsed_results = [] # Iterate through each document in the source_documents for document in data['source_documents']: # Extract page_content and url from each document page_content = document.page_content url = document.metadata['url'] # Add the extracted data to the parsed_results list parsed_results.append({'page_content': page_content, 'url': url}) return parsed_results def get_response(input): query = input chain_type_kwargs = {"prompt": prompt} qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs=chain_type_kwargs, verbose=True) response = qa(query) return (response['result'], parse_json_result(response)) input = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) # Define additional output for Relevant Links relevant_links_output = gr.Textbox( label="Relevant Links", placeholder="Links will be displayed here" ) iface = gr.Interface(fn=get_response, inputs=input, outputs=["text",relevant_links_output], title="Unibot", description="This is your friendly IUPUI Chatbot", examples=sample_prompts, allow_flagging=False, theme='HaleyCH/HaleyCH_Theme' ) iface.launch()