Spaces:
Paused
Paused
import streamlit as st | |
import pickle | |
import os | |
import torch | |
from tqdm.auto import tqdm | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
# from langchain.vectorstores import Chroma | |
from langchain.vectorstores import FAISS | |
from langchain.embeddings import HuggingFaceInstructEmbeddings | |
from langchain import HuggingFacePipeline | |
from langchain.chains import RetrievalQA | |
st.set_page_config( | |
page_title = 'aitGPT', | |
page_icon = '✅') | |
st.markdown("# Hello") | |
def load_scraped_web_info(): | |
with open("/Users/carlosito/Library/CloudStorage/OneDrive-Personal/AIT material/99-AIT-thesis/aitGPT/ait-web-document", "rb") as fp: | |
ait_web_documents = pickle.load(fp) | |
text_splitter = RecursiveCharacterTextSplitter( | |
# Set a really small chunk size, just to show. | |
chunk_size = 500, | |
chunk_overlap = 100, | |
length_function = len, | |
) | |
chunked_text = text_splitter.create_documents([doc for doc in tqdm(ait_web_documents)]) | |
st.markdown(f"Number of Documents: {len(ait_web_documents)}") | |
st.markdown(f"Number of chunked texts: {len(chunked_text)}") | |
def load_embedding_model(): | |
embedding_model = HuggingFaceInstructEmbeddings(model_name='hkunlp/instructor-base', | |
model_kwargs = {'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu')}) | |
return embedding_model | |
def load_faiss_index(): | |
vector_database = FAISS.load_local("faiss_index", embedding_model) | |
return vector_database | |
#-------------- | |
load_scraped_web_info() | |
embedding_model = load_embedding_model() | |
vector_database = load_faiss_index() | |
print("load done") | |
query_input = st.text_input(label= 'your question') | |
def retrieve_document(query_input): | |
related_doc = vector_database.similarity_search(query_input) | |
return related_doc | |
output = st.text_area(label = "Here is the relevant documents", | |
value = retrieve_document(query_input)) | |