hanzla commited on
Commit
10e5e54
1 Parent(s): c7721d9

testing with llama2

Browse files
Files changed (3) hide show
  1. .idea/misc.xml +3 -0
  2. src/interface.py +8 -7
  3. src/pdfchatbot.py +5 -1
.idea/misc.xml CHANGED
@@ -1,4 +1,7 @@
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <project version="4">
 
 
 
3
  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (LLama3Rag)" project-jdk-type="Python SDK" />
4
  </project>
 
1
  <?xml version="1.0" encoding="UTF-8"?>
2
  <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.12 (LLama3Rag)" />
5
+ </component>
6
  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (LLama3Rag)" project-jdk-type="Python SDK" />
7
  </project>
src/interface.py CHANGED
@@ -2,29 +2,30 @@ import gradio as gr
2
 
3
  # Gradio application setup
4
  def create_demo():
5
- with gr.Blocks(title= "RAG Chatbot Q&A",
6
- theme = "Soft"
7
- ) as demo:
8
  with gr.Column():
9
  with gr.Row():
10
  chat_history = gr.Chatbot(value=[], elem_id='chatbot', height=680)
11
  show_img = gr.Image(label='Overview', height=680)
12
 
 
 
 
 
 
 
13
  with gr.Row():
14
  with gr.Column(scale=0.60):
15
  text_input = gr.Textbox(
16
  show_label=False,
17
  placeholder="Type here to ask your PDF",
18
  container=False)
19
-
20
  with gr.Column(scale=0.20):
21
  submit_button = gr.Button('Send')
22
-
23
  with gr.Column(scale=0.20):
24
  uploaded_pdf = gr.UploadButton("📁 Upload PDF", file_types=[".pdf"])
25
-
26
 
27
- return demo, chat_history, show_img, text_input, submit_button, uploaded_pdf
28
 
29
  if __name__ == '__main__':
30
  demo, chatbot, show_img, text_input, submit_button, uploaded_pdf = create_demo()
 
2
 
3
  # Gradio application setup
4
  def create_demo():
5
+ with gr.Blocks(title="RAG Chatbot Q&A", theme="Soft") as demo:
 
 
6
  with gr.Column():
7
  with gr.Row():
8
  chat_history = gr.Chatbot(value=[], elem_id='chatbot', height=680)
9
  show_img = gr.Image(label='Overview', height=680)
10
 
11
+ with gr.Row():
12
+ # Add sliders here
13
+ with gr.Column(scale=0.2): # Adjust scale as needed
14
+ slider1 = gr.Slider(minimum=0, maximum=100, value=50, label="Chunk Size")
15
+ slider2 = gr.Slider(minimum=-10, maximum=10, value=0, label="Chunk Overlap")
16
+
17
  with gr.Row():
18
  with gr.Column(scale=0.60):
19
  text_input = gr.Textbox(
20
  show_label=False,
21
  placeholder="Type here to ask your PDF",
22
  container=False)
 
23
  with gr.Column(scale=0.20):
24
  submit_button = gr.Button('Send')
 
25
  with gr.Column(scale=0.20):
26
  uploaded_pdf = gr.UploadButton("📁 Upload PDF", file_types=[".pdf"])
 
27
 
28
+ return demo, chat_history, show_img, text_input, submit_button, uploaded_pdf, slider1, slider2
29
 
30
  if __name__ == '__main__':
31
  demo, chatbot, show_img, text_input, submit_button, uploaded_pdf = create_demo()
src/pdfchatbot.py CHANGED
@@ -11,6 +11,8 @@ from langchain.document_loaders import PyPDFLoader
11
  from langchain.prompts import PromptTemplate
12
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
13
  import spaces
 
 
14
 
15
 
16
  class PDFChatBot:
@@ -90,7 +92,9 @@ class PDFChatBot:
90
  """
91
  Load the vector database from the documents and embeddings.
92
  """
93
- self.vectordb = Chroma.from_documents(self.documents, self.embeddings)
 
 
94
 
95
  def load_tokenizer(self):
96
  """
 
11
  from langchain.prompts import PromptTemplate
12
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
13
  import spaces
14
+ from langchain_text_splitters import CharacterTextSplitter
15
+
16
 
17
 
18
  class PDFChatBot:
 
92
  """
93
  Load the vector database from the documents and embeddings.
94
  """
95
+ text_splitter = CharacterTextSplitter(chunk_size=256, chunk_overlap=0)
96
+ docs = text_splitter.split_documents(self.documents)
97
+ self.vectordb = Chroma.from_documents(docs, self.embeddings)
98
 
99
  def load_tokenizer(self):
100
  """