import gradio as gr from langchain_openai import OpenAIEmbeddings, ChatOpenAI from pinecone import Pinecone from langchain.chains import RetrievalQAWithSourcesChain from langchain_community.vectorstores import Pinecone as PineconeVec import os openai_key = os.environ['openai_key'] pinecone_key = os.environ['pinecone_key'] index_name = 'first-aid' embedding_model_name = 'text-embedding-ada-002' embed = OpenAIEmbeddings( model=embedding_model_name, openai_api_key=openai_key ) pc = Pinecone(api_key=pinecone_key) index = pc.Index(index_name) text_field = "text" vectorstore = PineconeVec( index, embed, text_field ) llm = ChatOpenAI( openai_api_key=openai_key, model_name='gpt-3.5-turbo', temperature=0.0 ) qa_with_sources = RetrievalQAWithSourcesChain.from_chain_type( llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever() ) def answer_question(query): result = qa_with_sources.invoke(query) return result['answer'] + 'sources: {}'.format(result['sources']) iface = gr.Interface( fn=answer_question, inputs="text", outputs="text" ) iface.launch(share=True) #%%