|
import os |
|
import streamlit as st |
|
import pickle |
|
from langchain.llms import OpenAI |
|
from langchain.document_loaders import UnstructuredURLLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings |
|
from langchain.chains import RetrievalQAWithSourcesChain |
|
from dotenv import load_dotenv |
|
|
|
|
|
def load_data(urls): |
|
loader = UnstructuredURLLoader(urls=urls) |
|
return loader.load() |
|
|
|
|
|
def split_data(data): |
|
text_splitter = RecursiveCharacterTextSplitter( |
|
separators=['\n\n', '\n', '.', ','], |
|
chunk_size=1000, |
|
chunk_overlap=100) |
|
return text_splitter.split_documents(data) |
|
|
|
|
|
def embed_data(individual_chunks): |
|
embeddings = OpenAIEmbeddings() |
|
|
|
return FAISS.from_documents(individual_chunks, embeddings) |
|
|
|
|
|
def save_faiss_index(file_path, vector_data): |
|
with open(file_path, "wb") as fp: |
|
pickle.dump(vector_data, fp) |
|
|
|
|
|
def load_faiss_index(file_path): |
|
with open(file_path, 'rb') as fp: |
|
return pickle.load(fp) |
|
|
|
|
|
def retrieval_chain(llm, vector_store): |
|
return RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vector_store.as_retriever()) |
|
|
|
|
|
def find_answer(retrieval_chain, question): |
|
return retrieval_chain({"question": question}) |
|
|
|
def main(): |
|
load_dotenv() |
|
|
|
|
|
st.markdown("## ArticleIQ - Smart News Research Assistant π") |
|
|
|
|
|
st.sidebar.title("Articles URLs π") |
|
urls = [st.sidebar.text_input(f"URL {i+1}") for i in range(3)] |
|
|
|
activate_articleiq = st.sidebar.button("Activate ArticleIQ") |
|
status_display = st.empty() |
|
|
|
file_path = 'FAISS_Vector_Data.pkl' |
|
llm = OpenAI(model='gpt-3.5-turbo-instruct',temperature=0.5, max_tokens=500) |
|
|
|
|
|
if activate_articleiq: |
|
data = load_data(urls) |
|
status_display.text('Loading Data β³') |
|
|
|
individual_chunks = split_data(data) |
|
status_display.text('Splitting Data βοΈ') |
|
|
|
vector_data = embed_data(individual_chunks) |
|
status_display.text('Embedding Vectors π₯π€') |
|
|
|
save_faiss_index(file_path, vector_data) |
|
|
|
|
|
question = status_display.text_input('Question: ') |
|
if question: |
|
if os.path.exists(file_path): |
|
vector_store = load_faiss_index(file_path) |
|
retrieval_chain_obj = retrieval_chain(llm, vector_store) |
|
final_output = find_answer(retrieval_chain_obj, question) |
|
st.header("IQ's Answer") |
|
st.write(final_output["answer"]) |
|
|
|
|
|
sources = final_output.get("sources", '') |
|
if sources: |
|
st.subheader("Further reading:") |
|
sources_str = sources.split("\n") |
|
for source in sources_str: |
|
st.write(source) |
|
|
|
if __name__ == "__main__": |
|
main() |