Update src/pdfchatbot.py
Browse files- src/pdfchatbot.py +4 -4
src/pdfchatbot.py
CHANGED
@@ -57,11 +57,11 @@ class PDFChatBot:
|
|
57 |
docs = text_splitter.split_documents(self.documents)
|
58 |
self.vectordb = Chroma.from_documents(docs, self.embeddings)
|
59 |
print("Vector store created")
|
60 |
-
@spaces.GPU
|
61 |
def load_tokenizer(self):
|
62 |
self.tokenizer = AutoTokenizer.from_pretrained("gradientai/Llama-3-8B-Instruct-Gradient-1048k")
|
63 |
|
64 |
-
@spaces.GPU
|
65 |
def create_organic_pipeline(self):
|
66 |
self.pipe = pipeline(
|
67 |
"text-generation",
|
@@ -77,7 +77,7 @@ class PDFChatBot:
|
|
77 |
self.current_context = context
|
78 |
print("Context Ready")
|
79 |
print(self.current_context)
|
80 |
-
@spaces.GPU
|
81 |
def create_organic_response(self, history, query):
|
82 |
self.get_organic_context(query)
|
83 |
"""
|
@@ -121,7 +121,7 @@ class PDFChatBot:
|
|
121 |
self.load_vectordb()
|
122 |
self.create_organic_pipeline()
|
123 |
#self.create_chain()
|
124 |
-
@spaces.GPU
|
125 |
def generate_response(self, history, query, file,chunk_size,chunk_overlap_percentage,model_temperature,max_chunks_in_context):
|
126 |
|
127 |
self.chunk_size = chunk_size
|
|
|
57 |
docs = text_splitter.split_documents(self.documents)
|
58 |
self.vectordb = Chroma.from_documents(docs, self.embeddings)
|
59 |
print("Vector store created")
|
60 |
+
@spaces.GPU(duration=120)
|
61 |
def load_tokenizer(self):
|
62 |
self.tokenizer = AutoTokenizer.from_pretrained("gradientai/Llama-3-8B-Instruct-Gradient-1048k")
|
63 |
|
64 |
+
@spaces.GPU(duration=120)
|
65 |
def create_organic_pipeline(self):
|
66 |
self.pipe = pipeline(
|
67 |
"text-generation",
|
|
|
77 |
self.current_context = context
|
78 |
print("Context Ready")
|
79 |
print(self.current_context)
|
80 |
+
@spaces.GPU(duration=120)
|
81 |
def create_organic_response(self, history, query):
|
82 |
self.get_organic_context(query)
|
83 |
"""
|
|
|
121 |
self.load_vectordb()
|
122 |
self.create_organic_pipeline()
|
123 |
#self.create_chain()
|
124 |
+
@spaces.GPU(duration=120)
|
125 |
def generate_response(self, history, query, file,chunk_size,chunk_overlap_percentage,model_temperature,max_chunks_in_context):
|
126 |
|
127 |
self.chunk_size = chunk_size
|