import gradio as gr from langchain_openai import OpenAIEmbeddings, ChatOpenAI from pinecone import Pinecone from langchain.chains import RetrievalQAWithSourcesChain from langchain_community.vectorstores import Pinecone as PineconeVec from langchain.prompts import PromptTemplate import os openai_key = os.environ['openai_key'] pinecone_key = os.environ['pinecone_key'] index_name = 'first-aid' embedding_model_name = 'text-embedding-ada-002' embed = OpenAIEmbeddings( model=embedding_model_name, openai_api_key=openai_key ) pc = Pinecone(api_key=pinecone_key) index = pc.Index(index_name) text_field = "text" vectorstore = PineconeVec( index, embed, text_field ) llm = ChatOpenAI( openai_api_key=openai_key, model_name='gpt-3.5-turbo', temperature=0.0 ) #Prompt PROMPT_TEMPLATE = """ You are called UEAid a first aid assistant helping a normal person to give first aid to some person. Give clear instructions step by step for the {question} strictly based on this context {summaries}. Always place in the end: "instructions given by UEAid". """ PROMPT = PromptTemplate( template=PROMPT_TEMPLATE, input_variables=["summaries ", "question"] ) chain_type_kwargs = {"prompt": PROMPT} qa_with_sources = RetrievalQAWithSourcesChain.from_chain_type( llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever(), return_source_documents=True, chain_type_kwargs=chain_type_kwargs ) def format_response(response): sources_info = '' documents = response['source_documents'] for document in documents: sources_info += "document: {}, page: {}\n".format(document.metadata['source'].rsplit("/")[-1], int(document.metadata['page'])) formated_response = response['answer'] + " Sources:\n" + sources_info return formated_response def answer_question(query): response = qa_with_sources.invoke(query) return format_response(response) iface = gr.Interface( fn=answer_question, inputs="text", outputs="text" ) iface.launch()