AI-RESEARCHER-2024 commited on
Commit
6d6b66e
·
verified ·
1 Parent(s): 9a24266

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from llama_index.llms.ollama import Ollama
6
+
7
+ # Set up Ollama
8
+ os.system('curl -fsSL https://ollama.com/install.sh | sh')
9
+ os.system('ollama serve &')
10
+ os.system('sleep 5')
11
+ os.system('ollama pull llama3.2')
12
+ os.system('ollama pull llama3.2')
13
+
14
+ # Initialize embeddings and LLM
15
+ embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-small-en-v1.5")
16
+ llama = Ollama(
17
+ model="llama3.2",
18
+ request_timeout=1000,
19
+ )
20
+
21
+ def initialize_index():
22
+ """Initialize the vector store index from PDF files in the data directory"""
23
+ # Load documents from the data directory
24
+ loader = SimpleDirectoryReader(
25
+ input_dir="data",
26
+ required_exts=[".pdf"]
27
+ )
28
+ documents = loader.load_data()
29
+
30
+ # Create index
31
+ index = VectorStoreIndex.from_documents(
32
+ documents,
33
+ embed_model=embeddings,
34
+ )
35
+
36
+ # Return query engine with Llama
37
+ return index.as_query_engine(llm=llama)
38
+
39
+ # Initialize the query engine at startup
40
+ query_engine = initialize_index()
41
+
42
+ def process_query(
43
+ message: str,
44
+ history: list[tuple[str, str]],
45
+ ) -> str:
46
+ """Process a query using the RAG system"""
47
+ try:
48
+ # Get response from the query engine
49
+ response = query_engine.query(
50
+ message,
51
+ streaming=True
52
+ )
53
+ return str(response)
54
+ except Exception as e:
55
+ return f"Error processing query: {str(e)}"
56
+
57
+ # Create the Gradio interface
58
+ demo = gr.ChatInterface(
59
+ process_query,
60
+ title="PDF Question Answering with RAG + Llama",
61
+ description="Ask questions about the content of the loaded PDF documents using Llama model",
62
+ examples=[
63
+ ["What is Computer"],
64
+ ],
65
+ cache_examples=False,
66
+ retry_btn=None,
67
+ undo_btn="Delete Previous",
68
+ clear_btn="Clear",
69
+ )
70
+
71
+ if __name__ == "__main__":
72
+ demo.launch()