INIFanalitica's picture
Update app.py
6b2826a verified
import streamlit as st
from PIL import Image
import textwrap
import google.generativeai as genai
import os
from PyPDF2 import PdfReader
# Function to display formatted Markdown text
def to_markdown(text):
text = text.replace('•', ' *')
return textwrap.indent(text, '> ', predicate=lambda _: True)
# Function to generate content using Gemini API
def generate_gemini_content(prompt, model_name='gemini-pro-vision', image=None):
model = genai.GenerativeModel(model_name)
if not image:
st.warning("Por favor, agrega una imagen para usar el modelo gemini-pro-vision.")
return None
response = model.generate_content([prompt, image])
return response
# Function to extract text from PDFs in a folder
def extract_text_from_pdf(pdf_path):
text = ""
with open(pdf_path, 'rb') as file:
pdf_reader = PdfReader(file)
num_pages = len(pdf_reader.pages)
for page_num in range(num_pages):
page = pdf_reader.pages[page_num]
text += page.extract_text()
return text
# Function to process PDFs in a folder
def process_pdfs_in_folder(folder_path):
pdf_texts = {}
for filename in os.listdir(folder_path):
if filename.endswith(".pdf"):
pdf_path = os.path.join(folder_path, filename)
text = extract_text_from_pdf(pdf_path)
pdf_texts[filename] = text
return pdf_texts
# Function to get a response related to reference information
def get_reference_response():
# Replace this with logic to generate a response based on reference information
return "Lo siento, no puedo proporcionar una respuesta específica, pero aquí hay información de referencia relevante."
# Streamlit app
def main():
st.set_page_config(page_title="MAX Chatbot - INIF", page_icon="🤖")
# Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
genai.configure(api_key='AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk')
st.title("MAX Chatbot - INIF")
st.sidebar.title("Configuración de MAX Chatbot")
# Configurar la API key de INIF
inif_api_key = 'AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk'
genai.configure(api_key=inif_api_key)
# Seleccionar el modelo Gemini
select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])
# Inicializar la sesión de chat
chat = genai.GenerativeModel(select_model).start_chat(history=[])
# Definir función para obtener respuesta del modelo Gemini
def get_response(messages):
response = chat.send_message(messages, stream=True)
return response
# Historial del chat
if "messages" not in st.session_state:
st.session_state["messages"] = []
messages = st.session_state["messages"]
# Obtener texto de los PDFs en el directorio de ejecución del script
current_directory = os.getcwd()
pdf_texts = process_pdfs_in_folder(current_directory)
# Incorporar el contenido de los PDFs al contexto del INIF
inif_context = (
"I am an informative data analyst chatbot named MAX, working for the National Institute of Fraud Research and Prevention (INIF), dedicated to fraud prevention and mitigation."
" If you have questions related to fraud or prevention, feel free to ask. For inquiries about other topics, I'll redirect you to the fraud prevention context."
"\n\nContact Information for INIF:"
"\nPhone: +57 317 638 94 71"
"\nEmail: atencionalcliente@inif.com.co"
"\n\nOur Mission:"
"\nTo be the most reliable engine of knowledge, research, and information in Colombia, capable of preventing and combating fraud through synergy between our team and companies."
"\n\nOur Vision:"
"\nTo lead the construction of a more honest culture, allowing us to progress as a society."
)
for pdf_name, pdf_text in pdf_texts.items():
inif_context += f"\n\n**{pdf_name}**:\n{pdf_text}"
# Mostrar mensajes del historial
if messages:
for message in messages:
role, parts = message.values()
if role.lower() == "user":
st.markdown(f"Tú: {parts[0]}")
elif role.lower() == "model":
st.markdown(f"Assistant: {to_markdown(parts[0])}")
# Entrada del usuario
user_input = st.text_area("Tú:")
# Concatenar el contexto del INIF al input del usuario
user_input_with_context = f"{user_input}\n\n{inif_context}"
# Get optional image input if the model selected is 'gemini-pro-vision'
image_file = None
if select_model == 'gemini-pro-vision':
image_file = st.file_uploader("Sube una imagen (si aplica):", type=["jpg", "jpeg", "png"])
# Display image if provided
if image_file:
st.image(image_file, caption="Imagen subida", use_column_width=True)
# Botón para enviar mensaje o generar contenido según el modelo seleccionado
if st.button("Enviar / Generar Contenido"):
if user_input:
messages.append({"role": "user", "parts": [user_input]})
if select_model == 'gemini-pro-vision':
# Modelo Gemini Vision Pro seleccionado
if not image_file:
st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
else:
image = Image.open(image_file)
response = generate_gemini_content(user_input_with_context, model_name=select_model, image=image)
if response:
if response.candidates:
parts = response.candidates[0].content.parts
generated_text = parts[0].text if parts else get_reference_response()
st.markdown(f"Assistant: {to_markdown(generated_text)}")
messages.append({"role": "model", "parts": [generated_text]})
else:
st.markdown(f"Assistant: {to_markdown(get_reference_response())}")
messages.append({"role": "model", "parts": [get_reference_response()]})
else:
# Otros modelos Gemini seleccionados
response = get_response(user_input_with_context)
# Mostrar respuesta del modelo solo una vez
res_text = ""
for chunk in response:
res_text += chunk.text
if not res_text.strip():
st.markdown(f"Assistant: {to_markdown(get_reference_response())}")
messages.append({"role": "model", "parts": [get_reference_response()]})
else:
st.markdown(f"Assistant: {to_markdown(res_text)}")
messages.append({"role": "model", "parts": [res_text]})
# Actualizar historial de mensajes en la sesión de Streamlit
st.session_state["messages"] = messages
if __name__ == "__main__":
main()