| | |
| |
|
| | from langchain.text_splitter import RecursiveCharacterTextSplitter |
| | from langchain.vectorstores import FAISS |
| | from langchain.chains import RetrievalQA |
| | from langchain_community.embeddings import HuggingFaceEmbeddings |
| | from langchain_groq import ChatGroq |
| |
|
| | def create_vectorstore_from_text(text: str): |
| | splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) |
| | texts = splitter.split_text(text) |
| | |
| | embeddings = HuggingFaceEmbeddings( |
| | model_name="sentence-transformers/all-MiniLM-L6-v2", |
| | model_kwargs={"device": "cpu"} |
| | ) |
| | |
| | vectorstore = FAISS.from_texts(texts, embedding=embeddings) |
| | return vectorstore |
| |
|
| | def create_rag_chain(vectorstore): |
| | retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) |
| | |
| | llm = ChatGroq(model_name="llama3-8b-8192", temperature=0) |
| | |
| | rag_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever) |
| | return rag_chain |
| |
|