Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
# ---------------------------------------------------------------------------------
|
2 |
# Aplicaci贸n principal para cargar el modelo, generar prompts y explicar los datos
|
3 |
# ---------------------------------------------------------------------------------
|
4 |
-
|
5 |
import streamlit as st # type: ignore
|
6 |
import os
|
7 |
import re
|
8 |
import pandas as pd # type: ignore
|
9 |
-
from dotenv import load_dotenv # type: ignore
|
|
|
10 |
from supabase import create_client, Client # type: ignore
|
11 |
-
|
12 |
# from pandasai import SmartDataframe # type: ignore
|
13 |
-
from pandasai import SmartDatalake # type: ignore
|
|
|
14 |
from pandasai.llm.local_llm import LocalLLM # type: ignore
|
15 |
from pandasai import Agent
|
16 |
import plotly.graph_objects as go
|
@@ -21,13 +21,10 @@ import time
|
|
21 |
# Funciones auxiliares
|
22 |
# ---------------------------------------------------------------------------------
|
23 |
|
24 |
-
|
25 |
def generate_graph_prompt(user_query):
|
26 |
prompt = f"""
|
27 |
You are a senior data scientist analyzing European labor force data.
|
28 |
-
|
29 |
Given the user's request: "{user_query}"
|
30 |
-
|
31 |
1. Plot the relevant data using graph_objects plotly:
|
32 |
- Use `df.query("geo == 'X'")` to filter the country, instead of chained comparisons.
|
33 |
- Avoid using filters like `df[df['geo'] == 'Germany']`.
|
@@ -57,11 +54,9 @@ def generate_graph_prompt(user_query):
|
|
57 |
return prompt
|
58 |
|
59 |
#TODO: Continuar mejorando el prompt
|
60 |
-
|
61 |
# ---------------------------------------------------------------------------------
|
62 |
# Configuraci贸n de conexi贸n a Supabase
|
63 |
# ---------------------------------------------------------------------------------
|
64 |
-
|
65 |
# Cargar variables de entorno desde archivo .env
|
66 |
load_dotenv()
|
67 |
|
@@ -94,7 +89,6 @@ def load_data(table):
|
|
94 |
else:
|
95 |
st.info("Response object does not have 'data' or known error attributes. Check the logs.")
|
96 |
return pd.DataFrame()
|
97 |
-
|
98 |
else:
|
99 |
st.error("Supabase client not initialized. Check environment variables.")
|
100 |
return pd.DataFrame()
|
@@ -112,7 +106,7 @@ def load_data(table):
|
|
112 |
labor_data = load_data("labor")
|
113 |
fertility_data = load_data("fertility")
|
114 |
# population_data = load_data("population")
|
115 |
-
# predictions_data = load_data("predictions")
|
116 |
|
117 |
# TODO: Buscar la forma de disminuir la latencia (muchos datos = mucha latencia)
|
118 |
|
@@ -120,25 +114,24 @@ fertility_data = load_data("fertility")
|
|
120 |
# Inicializar LLM desde Ollama con PandasAI
|
121 |
# ---------------------------------------------------------------------------------
|
122 |
|
123 |
-
# ollama_llm = LocalLLM(api_base="http://localhost:11434/v1",
|
124 |
# model="gemma3:12b",
|
125 |
-
# temperature=0.1,
|
126 |
# max_tokens=8000)
|
127 |
|
128 |
lm_studio_llm = LocalLLM(api_base="http://localhost:1234/v1") # el modelo es gemma-3-12b-it-qat
|
129 |
-
|
130 |
# sdl = SmartDatalake([labor_data, fertility_data, population_data, predictions_data], config={"llm": ollama_llm}) # DataFrame PandasAI-ready.
|
131 |
# sdl = SmartDatalake([labor_data, fertility_data], config={"llm": ollama_llm})
|
132 |
|
133 |
# agent = Agent([labor_data], config={"llm": lm_studio_llm}) # TODO: Probar Agent con multiples dfs
|
134 |
agent = Agent(
|
135 |
[
|
136 |
-
labor_data,
|
137 |
fertility_data
|
138 |
-
|
139 |
config={
|
140 |
"llm": lm_studio_llm,
|
141 |
-
"enable_cache": False,
|
142 |
"enable_filter_extraction": False # evita errores de parseo
|
143 |
}
|
144 |
)
|
@@ -147,48 +140,60 @@ agent = Agent(
|
|
147 |
# Configuraci贸n de la app en Streamlit
|
148 |
# ---------------------------------------------------------------------------------
|
149 |
|
150 |
-
|
151 |
-
st.title("
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# ---------------------------------------------------------------------------------
|
2 |
# Aplicaci贸n principal para cargar el modelo, generar prompts y explicar los datos
|
3 |
# ---------------------------------------------------------------------------------
|
|
|
4 |
import streamlit as st # type: ignore
|
5 |
import os
|
6 |
import re
|
7 |
import pandas as pd # type: ignore
|
8 |
+
from dotenv import load_dotenv # type: ignore
|
9 |
+
# Para cambios locales
|
10 |
from supabase import create_client, Client # type: ignore
|
|
|
11 |
# from pandasai import SmartDataframe # type: ignore
|
12 |
+
from pandasai import SmartDatalake # type: ignore
|
13 |
+
# Porque ya usamos m谩s de un df (m谩s de una tabla de nuestra db)
|
14 |
from pandasai.llm.local_llm import LocalLLM # type: ignore
|
15 |
from pandasai import Agent
|
16 |
import plotly.graph_objects as go
|
|
|
21 |
# Funciones auxiliares
|
22 |
# ---------------------------------------------------------------------------------
|
23 |
|
|
|
24 |
def generate_graph_prompt(user_query):
|
25 |
prompt = f"""
|
26 |
You are a senior data scientist analyzing European labor force data.
|
|
|
27 |
Given the user's request: "{user_query}"
|
|
|
28 |
1. Plot the relevant data using graph_objects plotly:
|
29 |
- Use `df.query("geo == 'X'")` to filter the country, instead of chained comparisons.
|
30 |
- Avoid using filters like `df[df['geo'] == 'Germany']`.
|
|
|
54 |
return prompt
|
55 |
|
56 |
#TODO: Continuar mejorando el prompt
|
|
|
57 |
# ---------------------------------------------------------------------------------
|
58 |
# Configuraci贸n de conexi贸n a Supabase
|
59 |
# ---------------------------------------------------------------------------------
|
|
|
60 |
# Cargar variables de entorno desde archivo .env
|
61 |
load_dotenv()
|
62 |
|
|
|
89 |
else:
|
90 |
st.info("Response object does not have 'data' or known error attributes. Check the logs.")
|
91 |
return pd.DataFrame()
|
|
|
92 |
else:
|
93 |
st.error("Supabase client not initialized. Check environment variables.")
|
94 |
return pd.DataFrame()
|
|
|
106 |
labor_data = load_data("labor")
|
107 |
fertility_data = load_data("fertility")
|
108 |
# population_data = load_data("population")
|
109 |
+
# predictions_data = load_data("predictions")
|
110 |
|
111 |
# TODO: Buscar la forma de disminuir la latencia (muchos datos = mucha latencia)
|
112 |
|
|
|
114 |
# Inicializar LLM desde Ollama con PandasAI
|
115 |
# ---------------------------------------------------------------------------------
|
116 |
|
117 |
+
# ollama_llm = LocalLLM(api_base="http://localhost:11434/v1",
|
118 |
# model="gemma3:12b",
|
119 |
+
# temperature=0.1,
|
120 |
# max_tokens=8000)
|
121 |
|
122 |
lm_studio_llm = LocalLLM(api_base="http://localhost:1234/v1") # el modelo es gemma-3-12b-it-qat
|
|
|
123 |
# sdl = SmartDatalake([labor_data, fertility_data, population_data, predictions_data], config={"llm": ollama_llm}) # DataFrame PandasAI-ready.
|
124 |
# sdl = SmartDatalake([labor_data, fertility_data], config={"llm": ollama_llm})
|
125 |
|
126 |
# agent = Agent([labor_data], config={"llm": lm_studio_llm}) # TODO: Probar Agent con multiples dfs
|
127 |
agent = Agent(
|
128 |
[
|
129 |
+
labor_data,
|
130 |
fertility_data
|
131 |
+
],
|
132 |
config={
|
133 |
"llm": lm_studio_llm,
|
134 |
+
"enable_cache": False,
|
135 |
"enable_filter_extraction": False # evita errores de parseo
|
136 |
}
|
137 |
)
|
|
|
140 |
# Configuraci贸n de la app en Streamlit
|
141 |
# ---------------------------------------------------------------------------------
|
142 |
|
143 |
+
st.set_page_config(page_title="GraphGen", page_icon="馃嚜馃嚭")
|
144 |
+
st.title("_Europe GraphGen_ :blue[Graph generator] :flag-eu:")
|
145 |
+
st.caption("Mapping Europe's data: Your tool for custom demographic charts")
|
146 |
+
|
147 |
+
if "messages" not in st.session_state:
|
148 |
+
st.session_state.messages = []
|
149 |
+
st.session_state.messages.append({"role": "assistant", "content": "What graphic do you have in mind?"})
|
150 |
+
|
151 |
+
for message in st.session_state.messages:
|
152 |
+
with st.chat_message(message["role"]):
|
153 |
+
st.markdown(message["content"])
|
154 |
+
|
155 |
+
prompt = st.chat_input("Type your message here...", key="chat_input_bottom")
|
156 |
+
|
157 |
+
if prompt:
|
158 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
159 |
+
with st.chat_message("user"):
|
160 |
+
st.markdown(prompt)
|
161 |
+
|
162 |
+
with st.chat_message("assistant"):
|
163 |
+
with st.spinner('Generating answer...'):
|
164 |
+
try:
|
165 |
+
print(f"\nGenerating prompt...\n")
|
166 |
+
graph_prompt = generate_graph_prompt(prompt)
|
167 |
+
print(f"\nPrompt generated: {graph_prompt}\n")
|
168 |
+
|
169 |
+
start_time = time.time()
|
170 |
+
answer = agent.chat(graph_prompt)
|
171 |
+
print(f"\nAnswer type: {type(answer)}\n") # Verificar tipo de objeto
|
172 |
+
print(f"\nAnswer content: {answer}\n") # Inspeccionar contenido de la respuesta
|
173 |
+
print(f"\nFull result: {agent.last_result}\n")
|
174 |
+
|
175 |
+
full_result = agent.last_result
|
176 |
+
explanation = full_result.get("explanation", "")
|
177 |
+
|
178 |
+
elapsed_time = time.time() - start_time
|
179 |
+
print(f"\nExecution time: {elapsed_time:.2f} seconds\n")
|
180 |
+
|
181 |
+
if isinstance(answer, str) and os.path.isfile(answer):
|
182 |
+
# Si el output es una ruta v谩lida a imagen
|
183 |
+
im = plt.imread(answer)
|
184 |
+
st.image(im)
|
185 |
+
os.remove(answer) # Limpiar archivo temporal
|
186 |
+
|
187 |
+
if explanation:
|
188 |
+
st.markdown(f"**Explanation:** {explanation}")
|
189 |
+
else:
|
190 |
+
# Si no es una ruta v谩lida, mostrar como texto
|
191 |
+
st.markdown(str(answer))
|
192 |
+
|
193 |
+
except Exception as e:
|
194 |
+
st.error(f"Error generating answer: {e}")
|
195 |
+
|
196 |
+
if st.button("Clear chat"):
|
197 |
+
st.session_state.messages = []
|
198 |
+
st.session_state.messages.append({"role": "assistant", "content": "Chat has been cleared. What graphic do you have in mind now?"})
|
199 |
+
st.rerun()
|