rag-tool / vector_store_retriever.py
Chris4K's picture
Rename token_counter.py to vector_store_retriever.py
a91cf11
raw
history blame
No virus
1.87 kB
import gradio as gr
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.agents import Tool
# Initialize the HuggingFaceInstructEmbeddings
hf = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-large",
embed_instruction="Represent the document for retrieval: ",
query_instruction="Represent the query for retrieval: "
)
# Example texts for the vector store
texts=["The meaning of life is to love","The meaning of vacation is to relax","Roses are red.","Hack the planet!"]
# Create a Chroma vector store from the example texts
db = Chroma.from_texts(texts, hf, collection_name="my-collection")
# Create a RetrievalQA chain
llm = LLM.from_model("vicuna-13b") # Replace with the appropriate LLM model
docsearcher = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff", # Replace with the appropriate chain type
return_source_documents=False,
retriever=db.as_retriever(search_type="similarity", search_kwargs={"k": 1})
)
class VectorStoreRetrieverTool(Tool):
name = "vectorstore_retriever"
description = "This tool uses LangChain's RetrievalQA to find relevant answers from a vector store based on a given query."
inputs = ["text"]
outputs = ["text"]
def __call__(self, query: str):
# Run the query through the RetrievalQA chain
response = docsearcher.run(query)
return response
# Create the Gradio interface using the HuggingFaceTool
tool = gr.Interface(
VectorStoreRetrieverTool(),
live=True,
title="LangChain-Application: Vectorstore-Retriever",
description="This tool uses LangChain's RetrievalQA to find relevant answers from a vector store based on a given query.",
)
# Launch the Gradio interface
tool.launch()