#Import libraries from langchain.document_loaders import WebBaseLoader from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.vectorstores import Chroma import os api_key = os.getenv("API_KEY") #Embedding Initialization embeddings=HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl") #llm initialization from langchain_groq import ChatGroq llm=ChatGroq( model_name="Llama3-8b-8192", groq_api_key="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", temperature=0 ) from langchain.indexes import VectorstoreIndexCreator from langchain.chains import RetrievalQA import gradio as gr #Helper function to submit the link,create text embeddings and store them in a vectorstore index=None def get_url(my_url): global index #Load_data docs=WebBaseLoader(my_url) #Indexing index=VectorstoreIndexCreator( vectorstore_cls=Chroma, embedding=embeddings, ).from_loaders([docs]) return index def get_answer(query): answer=index.query(query,llm=llm) return answer import gradio as gr url_interface=gr.Interface( fn=get_url, inputs=[gr.Textbox(label="Paste the medium article Url")], outputs=[gr.Textbox(label="Status",lines=4)] ) question_interface=gr.Interface( fn=get_answer, inputs=[gr.Textbox(label="Enter your question")], outputs=[gr.Textbox(label="Answer",lines=6)] ) demo=gr.TabbedInterface( interface_list=[url_interface,question_interface], tab_names=["Article URL","Question"], title="Medium QA") demo.launch(share=True)