Upload 2 files
Browse files- app.py +5 -18
- requirements.txt +5 -5
app.py
CHANGED
@@ -91,9 +91,9 @@ def predict(user_input,company):
|
|
91 |
}
|
92 |
|
93 |
filter = "dataset/"+company+"-10-k-2023.pdf"
|
94 |
-
|
95 |
# Create context_for_query
|
96 |
-
relevant_document_chunks =
|
97 |
context_list = [d.page_content for d in relevant_document_chunks]
|
98 |
context_for_query = ". ".join(context_list)
|
99 |
|
@@ -123,19 +123,6 @@ def predict(user_input,company):
|
|
123 |
|
124 |
|
125 |
# Get response from the LLM
|
126 |
-
def predict_output (user_input,company):
|
127 |
-
if company == "AWS":
|
128 |
-
company = "AWS"
|
129 |
-
elif company == "IBM":
|
130 |
-
company = "IBM"
|
131 |
-
elif company == "Google":
|
132 |
-
company = "Google"
|
133 |
-
elif company == "Msft":
|
134 |
-
company = "Msft"
|
135 |
-
elif company == "Meta":
|
136 |
-
company = "Meta"
|
137 |
-
model_output = predict(user_input,company)
|
138 |
-
return model_output
|
139 |
|
140 |
# While the prediction is made, log both the inputs and outputs to a local log file
|
141 |
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
|
@@ -161,7 +148,7 @@ company_input = gr.Radio(
|
|
161 |
label = 'company'
|
162 |
)
|
163 |
|
164 |
-
model_output = gr.
|
165 |
|
166 |
# Add text box and radio button to the interface
|
167 |
# The radio button is used to select the company 10k report in which the context needs to be retrieved.
|
@@ -173,8 +160,8 @@ model_output = gr.Textbox (label = 'Response')
|
|
173 |
|
174 |
demo = gr.Interface(
|
175 |
fn=predict_output,
|
176 |
-
inputs=[user_input,
|
177 |
-
outputs=
|
178 |
title="RAG on 10k-reports",
|
179 |
description="This API allows you to query on annaul reports",
|
180 |
concurrency_limit=16
|
|
|
91 |
}
|
92 |
|
93 |
filter = "dataset/"+company+"-10-k-2023.pdf"
|
94 |
+
|
95 |
# Create context_for_query
|
96 |
+
relevant_document_chunks = reportsdb.similarity_search(user_question, k=5, filter = {"source":filter})
|
97 |
context_list = [d.page_content for d in relevant_document_chunks]
|
98 |
context_for_query = ". ".join(context_list)
|
99 |
|
|
|
123 |
|
124 |
|
125 |
# Get response from the LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
# While the prediction is made, log both the inputs and outputs to a local log file
|
128 |
# While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
|
|
|
148 |
label = 'company'
|
149 |
)
|
150 |
|
151 |
+
model_output = gr.Text (label = 'Response')
|
152 |
|
153 |
# Add text box and radio button to the interface
|
154 |
# The radio button is used to select the company 10k report in which the context needs to be retrieved.
|
|
|
160 |
|
161 |
demo = gr.Interface(
|
162 |
fn=predict_output,
|
163 |
+
inputs=[user_input,company],
|
164 |
+
outputs=prediction,
|
165 |
title="RAG on 10k-reports",
|
166 |
description="This API allows you to query on annaul reports",
|
167 |
concurrency_limit=16
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
openai==1.23.2
|
2 |
-
langchain==0.1.9
|
3 |
-
langchain-community==0.0.32
|
4 |
-
chromadb==0.4.22
|
5 |
-
sentence-transformers==2.3.1
|
|
|
1 |
+
openai==1.23.2
|
2 |
+
langchain==0.1.9
|
3 |
+
langchain-community==0.0.32
|
4 |
+
chromadb==0.4.22
|
5 |
+
sentence-transformers==2.3.1
|