|
import streamlit as st |
|
from langchain.document_loaders import TextLoader |
|
import os |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain import HuggingFaceHub |
|
from langchain.chains import RetrievalQA |
|
|
|
|
|
|
|
hub_token = os.environ["hub_key"] |
|
|
|
|
|
loader = TextLoader("testing.txt") |
|
documents = loader.load() |
|
splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20) |
|
docs = splitter.split_documents(documents) |
|
|
|
embeddings = HuggingFaceEmbeddings() |
|
doc_search = Chroma.from_documents(docs, embeddings) |
|
|
|
repo_id = "mistralai/Mistral-7B-v0.1" |
|
llm = HuggingFaceHub(repo_id=repo_id, huggingfacehub_api_token=hub_token, model_kwargs={'temperature': 0.2,'min_length': 4000}) |
|
|
|
from langchain.schema import retriever |
|
retireval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=doc_search.as_retriever()) |
|
|
|
if query := st.chat_input("Enter your query "): |
|
with st.chat_message("Assistant"): |
|
st.write(retireval_chain.run(query)) |
|
|
|
|
|
|