|
import os |
|
import streamlit as st |
|
from langchain.chains import create_retrieval_chain |
|
from langchain.chains.combine_documents import create_stuff_documents_chain |
|
from langchain_together import ChatTogether |
|
from langchain_community.document_loaders import WebBaseLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain_community.embeddings import HuggingFaceEmbeddings |
|
from langchain_community.vectorstores import FAISS |
|
from langchain_core.prompts import ChatPromptTemplate |
|
|
|
st.set_page_config(page_title="Chat with Website") |
|
|
|
st.title("Chat with Website") |
|
|
|
|
|
url_input = st.text_input("Enter a website URL:") |
|
|
|
|
|
docs = [] |
|
documents = [] |
|
retriever = None |
|
result = None |
|
|
|
|
|
if url_input: |
|
try: |
|
loader = WebBaseLoader(url_input) |
|
docs = loader.load() |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50) |
|
documents = text_splitter.split_documents(docs) |
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
vector_store = FAISS.from_documents(documents, embeddings) |
|
|
|
|
|
retriever = vector_store.as_retriever() |
|
|
|
except Exception as e: |
|
st.error(f"Failed to load documents from the specified URL: {str(e)}") |
|
docs = [] |
|
documents = [] |
|
|
|
|
|
llm = ChatTogether( |
|
together_api_key=os.environ.get("TOGETHER_API_KEY"), |
|
model="meta-llama/Llama-3-70b-chat-hf" |
|
) |
|
|
|
|
|
user_query = st.text_area("Ask a question about the website content:", height=200) |
|
|
|
|
|
if st.button("Submit"): |
|
if documents and user_query and retriever: |
|
|
|
context = retriever.retrieve(user_query) |
|
|
|
|
|
prompt_template = """Answer the following question briefly based only on the provided context: |
|
<context> |
|
{context} |
|
</context> |
|
Question: {input}""" |
|
|
|
prompt = ChatPromptTemplate.from_template(prompt_template).render(context=context, input=user_query) |
|
|
|
|
|
result = llm.invoke(prompt) |
|
|
|
|
|
st.write(result.content) |
|
else: |
|
st.warning("Please enter a valid URL and a question to proceed.") |