File size: 1,874 Bytes
b34502b e03f966 b34502b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.agents import Tool
# Initialize the HuggingFaceInstructEmbeddings
hf = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-large",
embed_instruction="Represent the document for retrieval: ",
query_instruction="Represent the query for retrieval: "
)
# Example texts for the vector store
texts=["The meaning of life is to love","The meaning of vacation is to relax","Roses are red.","Hack the planet!"]
# Create a Chroma vector store from the example texts
db = Chroma.from_texts(texts, hf, collection_name="my-collection")
# Create a RetrievalQA chain
llm = LLM.from_model("vicuna-13b") # Replace with the appropriate LLM model
docsearcher = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # Replace with the appropriate chain type
return_source_documents=False,
retriever=db.as_retriever(search_type="similarity", search_kwargs={"k": 1})
)
class VectorStoreRetrieverTool(Tool):
name = "vectorstore_retriever"
description = "This tool uses LangChain's RetrievalQA to find relevant answers from a vector store based on a given query."
inputs = ["text"]
outputs = ["text"]
def __call__(self, query: str):
# Run the query through the RetrievalQA chain
response = docsearcher.run(query)
return response
# Create the Gradio interface using the HuggingFaceTool
tool = gr.Interface(
VectorStoreRetrieverTool(),
live=True,
title="LangChain-Application: Vectorstore-Retriever",
description="This tool uses LangChain's RetrievalQA to find relevant answers from a vector store based on a given query.",
)
# Launch the Gradio interface
tool.launch()
|