Chan-Y commited on
Commit
6d45118
1 Parent(s): b14adfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -9,8 +9,11 @@ from docx import Document as DocxDocument
9
  import fitz
10
  import os
11
  import torch
12
- os.environ['CUDA_LAUNCH_BLOCKING']="1"
13
- os.environ['PYTORCH_USE_CUDA_DSA'] = "1"
 
 
 
14
  lm_list = {
15
  "google/gemma-2-9b-it": "google/gemma-2-9b-it",
16
  "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3"
@@ -60,13 +63,17 @@ def handle_file_upload(file, llm_name):
60
  index = VectorStoreIndex.from_documents(
61
  documents, transformations=[text_splitter], embed_model=Settings.embed_model
62
  )
63
-
64
- return index.as_query_engine()
 
 
 
 
 
65
 
66
  def document_qa(file_upload, llm_choice, question_input):
67
- query_engine = handle_file_upload(file_upload, llm_choice)
68
- result = query_engine.query(question_input)
69
- return str(result)
70
 
71
 
72
  llm_choice = gr.Dropdown(choices=list(lm_list.values()), label="Choose LLM")
 
9
  import fitz
10
  import os
11
  import torch
12
+ from HybridRetriever import HybridRetriever
13
+ from ChatEngine import ChatEngine
14
+ from llama_index.retrievers.bm25 import BM25Retriever
15
+ from llama_index.core.retrievers import VectorIndexRetriever
16
+
17
  lm_list = {
18
  "google/gemma-2-9b-it": "google/gemma-2-9b-it",
19
  "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3"
 
63
  index = VectorStoreIndex.from_documents(
64
  documents, transformations=[text_splitter], embed_model=Settings.embed_model
65
  )
66
+
67
+ bm25_retriever = BM25Retriever(nodes=documents, similarity_top_k=2, tokenizer=text_splitter.split_text)
68
+ vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2)
69
+ hybrid_retriever = HybridRetriever(bm25_retriever=bm25_retriever, vector_retriever=vector_retriever)
70
+ chat_engine = ChatEngine(hybrid_retriever)
71
+ response = chat_engine.ask_question(question, llm)
72
+ return response
73
 
74
  def document_qa(file_upload, llm_choice, question_input):
75
+ response = handle_file_upload(file_upload, llm_choice)
76
+ return response
 
77
 
78
 
79
  llm_choice = gr.Dropdown(choices=list(lm_list.values()), label="Choose LLM")