File size: 2,742 Bytes
c6709ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from langchain_community.llms import LlamaCpp
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain import hub

# Set up callback manager and model parameters
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
n_gpu_layers = 0 
n_batch = 512 

llm = LlamaCpp(
    model_path="./models/phi-2.Q2_K.gguf",
    n_gpu_layers=n_gpu_layers, n_batch=n_batch,
    n_ctx = 4096,
    temperature=0.7,
    max_tokens=4096,
    top_p=1,
    callback_manager=callback_manager,
    verbose=False, 
)

# Load the prompt
prompt = hub.pull("rlm/rag-prompt")

# Function to format documents
def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

# Main function to process the question and URL
def get_answer(question, url):
    # Load data from the provided URL
    loader = WebBaseLoader(url)
    data = loader.load()

    # Split the data into small chunks
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
    all_splits = text_splitter.split_documents(data)

    # Store the data in Vector Store
    vectorstore = Chroma.from_documents(documents=all_splits, embedding=HuggingFaceEmbeddings())
    retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})

    retrieved_docs = retriever.invoke(question)

    rag_chain = (
        {"context": retriever  | format_docs, "question": RunnablePassthrough()}
        | prompt
        | llm
        | StrOutputParser()
    )

    answer = ""
    for chunk in rag_chain.stream(question):
        answer += chunk
        yield answer
    
    yield answer

# Create the Gradio interface
iface = gr.Interface(
    fn=get_answer,
    inputs=[gr.Textbox(lines=1, placeholder="Enter your question here..."),
            gr.Textbox(lines=1, placeholder="Enter the website URL here...")],
    outputs="text",
    title="Web-based Question Answering System",
    description="Ask a question about the content of a webpage and get an answer.",
    examples=[
        ["Which are the top 5 companies in the world with their revenue in table format?", "https://www.investopedia.com/biggest-companies-in-the-world-by-market-cap-5212784"]
    ]
)

# Launch the app
iface.launch(share=True)