File size: 5,337 Bytes
c5ce46c
c870a21
3fd22f7
c870a21
 
d4dacf8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5ce46c
d4dacf8
c870a21
 
e735b52
c870a21
ff3e928
 
 
 
 
 
 
c870a21
 
 
 
 
 
d4dacf8
 
 
 
 
c870a21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff3e928
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c870a21
 
 
 
 
 
 
 
 
 
 
d4dacf8
 
 
 
 
 
 
 
ff3e928
d4dacf8
 
 
 
 
 
 
 
 
 
ff3e928
17dcde5
 
 
 
 
 
 
c870a21
 
 
c5ce46c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import streamlit as st
from PIL import Image
import textwrap
import google.generativeai as genai

# Function to display formatted Markdown text
def to_markdown(text):
    text = text.replace('•', '  *')
    return textwrap.indent(text, '> ', predicate=lambda _: True)

# Function to generate content using Gemini API
def generate_gemini_content(prompt, model_name='gemini-pro-vision', image=None):
    model = genai.GenerativeModel(model_name)
    if not image:
        st.warning("Por favor, agrega una imagen para usar el modelo gemini-pro-vision.")
        return None

    response = model.generate_content([prompt, image])
    return response

# Streamlit app
def main():
    st.set_page_config(page_title="Laura Chatbot - INIF", page_icon="🤖")

    # Configurar la API key de Gemini (reemplazar con tu clave de API de Gemini)
    genai.configure(api_key='AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk')

    st.title("Laura Chatbot - INIF")
    st.sidebar.title("Configuración de Laura Chatbot")

    # Configurar la API key de INIF
    inif_api_key = 'AIzaSyA4k6JoFNZsf8L1ixLMMRjEMoBPns5SHZk'
    genai.configure(api_key=inif_api_key)

    # Seleccionar el modelo Gemini
    select_model = st.sidebar.selectbox("Selecciona el modelo", ["gemini-pro", "gemini-pro-vision"])

    # Inicializar la sesión de chat
    chat = genai.GenerativeModel(select_model).start_chat(history=[])

    # Definir función para obtener respuesta del modelo Gemini
    def get_response(messages):
        response = chat.send_message(messages, stream=True)
        return response

    # Historial del chat
    if "messages" not in st.session_state:
        st.session_state["messages"] = []

    messages = st.session_state["messages"]

    # Mostrar mensajes del historial
    if messages:
        for message in messages:
            role, parts = message.values()
            if role.lower() == "user":
                st.markdown(f"Tú: {parts[0]}")
            elif role.lower() == "model":
                st.markdown(f"Assistant: {to_markdown(parts[0])}")

    # Entrada del usuario
    user_input = st.text_area("Tú:")

    # Agregar contexto del INIF al input del usuario
    inif_context = (
        "I am an informative data analyst chatbot named TERMINATOR, working for the National Institute of Fraud Research and Prevention (INIF), dedicated to fraud prevention and mitigation."
        " If you have questions related to fraud or prevention, feel free to ask. For inquiries about other topics, I'll redirect you to the fraud prevention context."
        "\n\nContact Information for INIF:"
        "\nPhone: +57 317 638 94 71"
        "\nEmail: atencionalcliente@inif.com.co"
        "\n\nOur Mission:"
        "\nTo be the most reliable engine of knowledge, research, and information in Colombia, capable of preventing and combating fraud through synergy between our team and companies."
        "\n\nOur Vision:"
        "\nTo lead the construction of a more honest culture, allowing us to progress as a society."
    )

    # Concatenar el contexto del INIF al input del usuario
    user_input_with_context = f"{user_input}\n\n{inif_context}"

    # Get optional image input if the model selected is 'gemini-pro-vision'
    image_file = None
    if select_model == 'gemini-pro-vision':
        image_file = st.file_uploader("Sube una imagen (si aplica):", type=["jpg", "jpeg", "png"])

        # Display image if provided
        if image_file:
            st.image(image_file, caption="Imagen subida", use_column_width=True)

    # Botón para enviar mensaje o generar contenido según el modelo seleccionado
    if st.button("Enviar / Generar Contenido"):
        if user_input:
            messages.append({"role": "user", "parts": [user_input]})
            if select_model == 'gemini-pro-vision':
                # Modelo Gemini Vision Pro seleccionado
                if not image_file:
                    st.warning("Por favor, proporciona una imagen para el modelo gemini-pro-vision.")
                else:
                    image = Image.open(image_file)
                    response = generate_gemini_content(user_input_with_context, model_name=select_model, image=image)
                    if response:
                        if response.candidates:
                            parts = response.candidates[0].content.parts
                            generated_text = parts[0].text if parts else "No se generó contenido."
                            st.markdown(f"Assistant: {to_markdown(generated_text)}")
                            messages.append({"role": "model", "parts": [generated_text]})
                        else:
                            st.warning("No se encontraron candidatos en la respuesta.")
            else:
                # Otros modelos Gemini seleccionados
                response = get_response(user_input_with_context)

                # Mostrar respuesta del modelo solo una vez
                res_text = ""
                for chunk in response:
                    res_text += chunk.text
                st.markdown(f"Assistant: {to_markdown(res_text)}")
                messages.append({"role": "model", "parts": [res_text]})

    # Actualizar historial de mensajes en la sesión de Streamlit
    st.session_state["messages"] = messages

if __name__ == "__main__":
    main()