mitulagr2 commited on
Commit
5e8fd8b
1 Parent(s): 84b3afe

Add app file

Browse files
Files changed (6) hide show
  1. Dockerfile +29 -0
  2. app/__init__.py +0 -0
  3. app/main.py +58 -0
  4. app/rag.py +59 -0
  5. requirements.txt +6 -0
  6. start_service.sh +16 -0
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ FROM python:3.11
3
+
4
+ #
5
+ WORKDIR /code
6
+
7
+ #
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ #
11
+ COPY ./start_service.sh /code/start_service.sh
12
+
13
+ #
14
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
15
+
16
+ #
17
+ COPY ./app /code/app
18
+
19
+ #
20
+ # EXPOSE 11434
21
+
22
+ #
23
+ RUN chmod +x /code/start_service.sh
24
+
25
+ # # Run .sh file
26
+ ENTRYPOINT ["/bin/bash", "/code/start_service.sh"]
27
+
28
+ #
29
+ # CMD ["fastapi", "run", "app/main.py", "--port", "80"]
app/__init__.py ADDED
File without changes
app/main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import tempfile
4
+ from tempfile import NamedTemporaryFile
5
+ from pathlib import Path
6
+
7
+ from fastapi import FastAPI, UploadFile
8
+ from fastapi.middleware import Middleware
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from .rag import ChatPDF
11
+
12
+ middleware = [
13
+ Middleware(
14
+ CORSMiddleware,
15
+ allow_origins=["*"],
16
+ allow_methods=['*'],
17
+ allow_headers=['*']
18
+ )
19
+ ]
20
+
21
+ app = FastAPI(middleware=middleware)
22
+
23
+ session_assistant = ChatPDF()
24
+ session_messages = []
25
+
26
+ @app.get("/query")
27
+ def process_input(text: str):
28
+ if text and len(text.strip()) > 0:
29
+ text = text.strip()
30
+ agent_text = session_assistant.ask(text)
31
+ session_messages.append((text, True))
32
+ session_messages.append((agent_text, False))
33
+ return agent_text
34
+
35
+
36
+ @app.post("/upload")
37
+ def upload(files: list[UploadFile]):
38
+ session_assistant.clear()
39
+ session_messages = []
40
+
41
+ for file in files:
42
+ path = f"files/{file.filename}"
43
+ try:
44
+ suffix = Path(file.filename).suffix
45
+ with NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
46
+ shutil.copyfileobj(file.file, tmp)
47
+ tmp_path = Path(tmp.name)
48
+ session_assistant.ingest(tmp_path)
49
+ os.remove(tmp_path)
50
+ finally:
51
+ file.file.close()
52
+
53
+ return "Files inserted!"
54
+
55
+
56
+ @app.get("/")
57
+ def ping():
58
+ return "Pong!"
app/rag.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.vectorstores import Chroma
2
+ from langchain_community.chat_models import ChatOllama
3
+ from langchain_community.embeddings import FastEmbedEmbeddings
4
+ from langchain.schema.output_parser import StrOutputParser
5
+ from langchain_community.document_loaders import PyMuPDFLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain.schema.runnable import RunnablePassthrough
8
+ from langchain.prompts import PromptTemplate
9
+ from langchain_community.vectorstores.utils import filter_complex_metadata
10
+
11
+
12
+ class ChatPDF:
13
+ vector_store = None
14
+ retriever = None
15
+ chain = None
16
+
17
+ def __init__(self):
18
+ self.model = ChatOllama(model="phi3:mini")
19
+ self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=100)
20
+ self.prompt = PromptTemplate.from_template(
21
+ """
22
+ <s> [INST] You are an assistant for question-answering tasks. Use the following pieces of retrieved context
23
+ to answer the question. If you don't know the answer, just say that you don't know. Use three sentences
24
+ maximum and keep the answer concise. [/INST] </s>
25
+ [INST] Question: {question}
26
+ Context: {context}
27
+ Answer: [/INST]
28
+ """
29
+ )
30
+
31
+ def ingest(self, pdf_file_path: str):
32
+ docs = PyMuPDFLoader(file_path=pdf_file_path).load()
33
+ chunks = self.text_splitter.split_documents(docs)
34
+ chunks = filter_complex_metadata(chunks)
35
+
36
+ vector_store = Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings())
37
+ self.retriever = vector_store.as_retriever(
38
+ search_type="similarity_score_threshold",
39
+ search_kwargs={
40
+ "k": 3,
41
+ "score_threshold": 0.5,
42
+ },
43
+ )
44
+
45
+ self.chain = ({"context": self.retriever, "question": RunnablePassthrough()}
46
+ | self.prompt
47
+ | self.model
48
+ | StrOutputParser())
49
+
50
+ def ask(self, query: str):
51
+ if not self.chain:
52
+ return "Please, add a PDF document first."
53
+
54
+ return self.chain.invoke(query)
55
+
56
+ def clear(self):
57
+ self.vector_store = None
58
+ self.retriever = None
59
+ self.chain = None
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ pymupdf
3
+ langchain
4
+ langchain-community
5
+ fastembed
6
+ chromadb
start_service.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ #
4
+ curl -fsSL https://ollama.com/install.sh | sh
5
+
6
+ # Start Ollama in the background
7
+ ollama serve &
8
+
9
+ # Wait for Ollama to start
10
+ sleep 5
11
+
12
+ # Pull and run <YOUR_MODEL_NAME>
13
+ ollama pull phi3:mini
14
+
15
+ #
16
+ fastapi run /code/app/main.py --port 7860