hanzla commited on
Commit
ab45b68
1 Parent(s): b6e4be6

testing with llama3

Browse files
Files changed (3) hide show
  1. src/app.py +2 -2
  2. src/interface.py +1 -1
  3. src/pdfchatbot.py +2 -4
src/app.py CHANGED
@@ -2,7 +2,7 @@ from interface import create_demo
2
  from pdfchatbot import PDFChatBot
3
 
4
  # Create Gradio interface
5
- demo, chat_history, show_img, txt, submit_button, uploaded_pdf, slider1 = create_demo()
6
 
7
  # Create PDFChatBot instance
8
  pdf_chatbot = PDFChatBot()
@@ -12,7 +12,7 @@ pdf_chatbot.load_tokenizer()
12
  # Set up event handlers
13
  with demo:
14
  # Event handler for uploading a PDF
15
- uploaded_pdf.upload(pdf_chatbot.render_file, inputs=[uploaded_pdf,slider1], outputs=[show_img])
16
 
17
  # Event handler for submitting text and generating response
18
  submit_button.click(pdf_chatbot.add_text, inputs=[chat_history, txt], outputs=[chat_history], queue=False).\
 
2
  from pdfchatbot import PDFChatBot
3
 
4
  # Create Gradio interface
5
+ demo, chat_history, show_img, txt, submit_button, uploaded_pdf, _ = create_demo()
6
 
7
  # Create PDFChatBot instance
8
  pdf_chatbot = PDFChatBot()
 
12
  # Set up event handlers
13
  with demo:
14
  # Event handler for uploading a PDF
15
+ uploaded_pdf.upload(pdf_chatbot.render_file, inputs=[uploaded_pdf], outputs=[show_img])
16
 
17
  # Event handler for submitting text and generating response
18
  submit_button.click(pdf_chatbot.add_text, inputs=[chat_history, txt], outputs=[chat_history], queue=False).\
src/interface.py CHANGED
@@ -11,7 +11,7 @@ def create_demo():
11
  with gr.Row():
12
  # Add sliders here
13
  with gr.Column(): # Adjust scale as needed
14
- slider1 = gr.Slider(minimum=256, maximum=1024, value=50, label="Chunk Size")
15
  with gr.Row():
16
  with gr.Column(scale=0.60):
17
  text_input = gr.Textbox(
 
11
  with gr.Row():
12
  # Add sliders here
13
  with gr.Column(): # Adjust scale as needed
14
+ slider1 = gr.Slider(minimum=256, maximum=1024, value=256, label="Chunk Size")
15
  with gr.Row():
16
  with gr.Column(scale=0.60):
17
  text_input = gr.Textbox(
src/pdfchatbot.py CHANGED
@@ -95,7 +95,7 @@ class PDFChatBot:
95
  """
96
  Load the vector database from the documents and embeddings.
97
  """
98
- text_splitter = CharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=256)
99
  docs = text_splitter.split_documents(self.documents)
100
  self.vectordb = Chroma.from_documents(docs, self.embeddings)
101
 
@@ -178,13 +178,11 @@ class PDFChatBot:
178
 
179
  result = self.chain({"question": query, 'chat_history': self.chat_history}, return_only_outputs=True)
180
  self.chat_history.append((query, result["answer"]))
181
- self.page = list(result['source_documents'][0])[1][1]['page']
182
-
183
  for char in result['answer']:
184
  history[-1][-1] += char
185
  return history, " "
186
 
187
- def render_file(self, file,chunk_size):
188
  """
189
  Renders a specific page of a PDF file as an image.
190
 
 
95
  """
96
  Load the vector database from the documents and embeddings.
97
  """
98
+ text_splitter = CharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=100)
99
  docs = text_splitter.split_documents(self.documents)
100
  self.vectordb = Chroma.from_documents(docs, self.embeddings)
101
 
 
178
 
179
  result = self.chain({"question": query, 'chat_history': self.chat_history}, return_only_outputs=True)
180
  self.chat_history.append((query, result["answer"]))
 
 
181
  for char in result['answer']:
182
  history[-1][-1] += char
183
  return history, " "
184
 
185
+ def render_file(self, file,chunk_size=256):
186
  """
187
  Renders a specific page of a PDF file as an image.
188