INIFanalitica
commited on
Commit
•
6b2826a
1
Parent(s):
4a917de
Update app.py
Browse files
app.py
CHANGED
@@ -41,6 +41,11 @@ def process_pdfs_in_folder(folder_path):
|
|
41 |
pdf_texts[filename] = text
|
42 |
return pdf_texts
|
43 |
|
|
|
|
|
|
|
|
|
|
|
44 |
# Streamlit app
|
45 |
def main():
|
46 |
st.set_page_config(page_title="MAX Chatbot - INIF", page_icon="🤖")
|
@@ -130,14 +135,12 @@ def main():
|
|
130 |
if response:
|
131 |
if response.candidates:
|
132 |
parts = response.candidates[0].content.parts
|
133 |
-
generated_text = parts[0].text if parts else
|
134 |
st.markdown(f"Assistant: {to_markdown(generated_text)}")
|
135 |
messages.append({"role": "model", "parts": [generated_text]})
|
136 |
else:
|
137 |
-
|
138 |
-
|
139 |
-
st.markdown(f"Assistant: {to_markdown(inif_context)}")
|
140 |
-
messages.append({"role": "model", "parts": [inif_context]})
|
141 |
else:
|
142 |
# Otros modelos Gemini seleccionados
|
143 |
response = get_response(user_input_with_context)
|
@@ -146,11 +149,9 @@ def main():
|
|
146 |
res_text = ""
|
147 |
for chunk in response:
|
148 |
res_text += chunk.text
|
149 |
-
if not res_text:
|
150 |
-
|
151 |
-
|
152 |
-
st.markdown(f"Assistant: {to_markdown(inif_context)}")
|
153 |
-
messages.append({"role": "model", "parts": [inif_context]})
|
154 |
else:
|
155 |
st.markdown(f"Assistant: {to_markdown(res_text)}")
|
156 |
messages.append({"role": "model", "parts": [res_text]})
|
|
|
41 |
pdf_texts[filename] = text
|
42 |
return pdf_texts
|
43 |
|
44 |
+
# Function to get a response related to reference information
|
45 |
+
def get_reference_response():
|
46 |
+
# Replace this with logic to generate a response based on reference information
|
47 |
+
return "Lo siento, no puedo proporcionar una respuesta específica, pero aquí hay información de referencia relevante."
|
48 |
+
|
49 |
# Streamlit app
|
50 |
def main():
|
51 |
st.set_page_config(page_title="MAX Chatbot - INIF", page_icon="🤖")
|
|
|
135 |
if response:
|
136 |
if response.candidates:
|
137 |
parts = response.candidates[0].content.parts
|
138 |
+
generated_text = parts[0].text if parts else get_reference_response()
|
139 |
st.markdown(f"Assistant: {to_markdown(generated_text)}")
|
140 |
messages.append({"role": "model", "parts": [generated_text]})
|
141 |
else:
|
142 |
+
st.markdown(f"Assistant: {to_markdown(get_reference_response())}")
|
143 |
+
messages.append({"role": "model", "parts": [get_reference_response()]})
|
|
|
|
|
144 |
else:
|
145 |
# Otros modelos Gemini seleccionados
|
146 |
response = get_response(user_input_with_context)
|
|
|
149 |
res_text = ""
|
150 |
for chunk in response:
|
151 |
res_text += chunk.text
|
152 |
+
if not res_text.strip():
|
153 |
+
st.markdown(f"Assistant: {to_markdown(get_reference_response())}")
|
154 |
+
messages.append({"role": "model", "parts": [get_reference_response()]})
|
|
|
|
|
155 |
else:
|
156 |
st.markdown(f"Assistant: {to_markdown(res_text)}")
|
157 |
messages.append({"role": "model", "parts": [res_text]})
|