coder
commited on
Commit
•
7245dc6
1
Parent(s):
8f01c07
first commit
Browse files- Home.py +221 -0
- core/controllers/main_process.py +43 -0
- core/controllers/pages_controller.py +51 -0
- core/estilos/home.css +76 -0
- core/estilos/main.css +4 -0
- requirements.txt +9 -0
Home.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from core.controllers.pages_controller import Page
|
2 |
+
from core.controllers.main_process import Generador
|
3 |
+
|
4 |
+
|
5 |
+
class Home(Page):
|
6 |
+
variables_globales = {
|
7 |
+
"prompt": "cat,fish,dog",
|
8 |
+
"img_bytes": None,
|
9 |
+
"img_src": None,
|
10 |
+
"settings": {
|
11 |
+
"model": str("facebook/detr-resnet-50"),
|
12 |
+
"tokenizer": str("facebook/detr-resnet-50"),
|
13 |
+
},
|
14 |
+
"img_output": None,
|
15 |
+
"predicciones": None,
|
16 |
+
"fuentes": [
|
17 |
+
{"titulo": "Papper",
|
18 |
+
"url": "https://arxiv.org/abs/2112.10003"},
|
19 |
+
{"titulo": "Transformers",
|
20 |
+
"url": "https://huggingface.co/docs/transformers/v4.32.1/en/index"},
|
21 |
+
{"titulo": "Modelo CIDAS/ClipSeg-rd64-refined",
|
22 |
+
"url": "https://huggingface.co/CIDAS/clipseg-rd64-refined"},
|
23 |
+
{"titulo": "Repositorio",
|
24 |
+
"url": "https://huggingface.co/CIDAS/clipseg-rd64-refined"},
|
25 |
+
{"titulo": "Material Apoyo",
|
26 |
+
"url": "https://github.com/NielsRogge/Transformers-Tutorials/blob/master/CLIPSeg/Zero_shot_image_segmentation_with_CLIPSeg.ipynb"},
|
27 |
+
{"titulo": "IO",
|
28 |
+
"url": "https://pypi.org/project/io/"},
|
29 |
+
{"titulo": "PIL",
|
30 |
+
"url": "https://pypi.org/project/Pillow/"},
|
31 |
+
{"titulo": "requests",
|
32 |
+
"url": "https://pypi.org/project/requests/"},
|
33 |
+
{"titulo": "timm",
|
34 |
+
"url": "https://pypi.org/project/timm/"},
|
35 |
+
{"titulo": "matplotlib",
|
36 |
+
"url": "https://pypi.org/project/matplotlib/"}]
|
37 |
+
}
|
38 |
+
|
39 |
+
archivos_css = ["main",
|
40 |
+
"home"]
|
41 |
+
|
42 |
+
def __init__(self, title=str("Bienvenido"), icon=str("🖼️"), init_page=False):
|
43 |
+
super().__init__()
|
44 |
+
if init_page:
|
45 |
+
self.new_page(title=title,
|
46 |
+
icon=icon)
|
47 |
+
self.init_globals(globals=self.variables_globales)
|
48 |
+
for archivo in self.archivos_css:
|
49 |
+
self.cargar_css(archivo_css=archivo)
|
50 |
+
self.about()
|
51 |
+
|
52 |
+
def about(self):
|
53 |
+
self.builder().sidebar.markdown(unsafe_allow_html=False,
|
54 |
+
help=None,
|
55 |
+
body="""
|
56 |
+
## Tema
|
57 |
+
|
58 |
+
La segmentación de imágenes es el proceso de dividir una imagen en segmentos o regiones significativas.
|
59 |
+
|
60 |
+
Puede ser de diferentes tipos, como la segmentación semántica (asignación de etiquetas de clase a píxeles) o la segmentación basada en contornos (división de una imagen en áreas definidas por bordes).
|
61 |
+
|
62 |
+
Esta técnica es ampliamente utilizada en visión por computadora para analizar y comprender imágenes.
|
63 |
+
|
64 |
+
## Recursos
|
65 |
+
""")
|
66 |
+
|
67 |
+
for fuente in self.get_global('fuentes'):
|
68 |
+
self.builder().sidebar.markdown(
|
69 |
+
unsafe_allow_html=False,
|
70 |
+
help=None,
|
71 |
+
body=f"""
|
72 |
+
* **{fuente.get('titulo')}** - {fuente.get('url')}
|
73 |
+
"""
|
74 |
+
)
|
75 |
+
self.builder().sidebar.markdown(
|
76 |
+
unsafe_allow_html=False,
|
77 |
+
help=None,
|
78 |
+
body="""
|
79 |
+
###### Es **importante** mencionar que esta **compilación** se encuentra en proceso de **construcción**.
|
80 |
+
|
81 |
+
*Si deseas **participar**, eres **bienvenido** de aportar en el repositorio oficial:*
|
82 |
+
|
83 |
+
https://github.com/coder160/cuadernos/
|
84 |
+
""")
|
85 |
+
|
86 |
+
def obtener_bytes(self, archivo):
|
87 |
+
self.set_global(key='img_src',
|
88 |
+
value=archivo)
|
89 |
+
|
90 |
+
def actualizar_modelo_tokenizer(self, modelo, tokenizer):
|
91 |
+
self.set_global(key='settings',
|
92 |
+
value={'model': modelo,
|
93 |
+
'tokenizer': tokenizer})
|
94 |
+
|
95 |
+
def procesar_imagen(self, prompt):
|
96 |
+
proceso = Generador(configuraciones=self.get_global('settings'))
|
97 |
+
proceso.generar_prediccion(
|
98 |
+
imagen_bytes=self.imgg.open(
|
99 |
+
self.get_global('img_src')).convert("RGB"),
|
100 |
+
new_prompt=prompt
|
101 |
+
)
|
102 |
+
self.set_global(key='predicciones', value=proceso.prediccion)
|
103 |
+
|
104 |
+
def expander_instrucciones(self, placeholder):
|
105 |
+
instrucciones = placeholder.expander(expanded=False,
|
106 |
+
label="Instrucciones")
|
107 |
+
instrucciones.markdown(unsafe_allow_html=False,
|
108 |
+
help=None,
|
109 |
+
body="""
|
110 |
+
1. **Cargue su Imagen Base**:
|
111 |
+
|
112 |
+
Elija cualquiera de las dos opciones para cargar su imagen:
|
113 |
+
|
114 |
+
* **Desde su Galería**: cargue la imagen desde la galería de su teléfono o computadora.
|
115 |
+
|
116 |
+
* **Desde su Cámara**: cargue la imagen directamente desde la cámara de su teléfono o computadora.
|
117 |
+
|
118 |
+
|
119 |
+
2. **Detectar / Predecir**:
|
120 |
+
|
121 |
+
Realice la **detección de objetos** con base a las predicciones realizadas por el **modelo** pre-entrenado seleccionado.
|
122 |
+
|
123 |
+
A partir del dataset con el que fue pre-entrenado el modelo, tratará de predecir cuales son los objetos en la imagen cargada.
|
124 |
+
|
125 |
+
|
126 |
+
* **Configuraciones Avanzadas**:
|
127 |
+
|
128 |
+
*Elija un modelo y procesador de la lista disponible, o elija uno directamente de la base de modelos disponible en HuggingFace.*
|
129 |
+
""")
|
130 |
+
|
131 |
+
def expander_imagen_base(self, placeholder):
|
132 |
+
imagen_base = placeholder.container()
|
133 |
+
imagen_base.markdown(unsafe_allow_html=False,
|
134 |
+
help=None,
|
135 |
+
body="""
|
136 |
+
**Cargue su Imagen Base**:
|
137 |
+
""")
|
138 |
+
archivo_expander = imagen_base.expander(expanded=False,
|
139 |
+
label="Desde su Galería")
|
140 |
+
_archivo = archivo_expander.file_uploader(label="Galería",
|
141 |
+
on_change=None,
|
142 |
+
accept_multiple_files=False,
|
143 |
+
label_visibility="visible")
|
144 |
+
if (archivo_expander.button(label="Cargar Archivo", type="secondary", use_container_width=True,
|
145 |
+
help="Suba un archivo.") and _archivo is not None):
|
146 |
+
self.obtener_bytes(_archivo)
|
147 |
+
|
148 |
+
camara_expander = imagen_base.expander(expanded=False,
|
149 |
+
label="Desde su Cámara")
|
150 |
+
_captura = camara_expander.camera_input(label="Cámara",
|
151 |
+
on_change=None,
|
152 |
+
label_visibility="visible")
|
153 |
+
if (camara_expander.button(label="Cargar Captura", type="secondary", use_container_width=True,
|
154 |
+
help="Tome una fotografía.") and _captura is not None):
|
155 |
+
self.obtener_bytes(_captura)
|
156 |
+
|
157 |
+
def expander_configuraciones(self, placeholder):
|
158 |
+
configuraciones = placeholder.expander(
|
159 |
+
expanded=False, label="Configuraciones Avanzadas")
|
160 |
+
modelo = configuraciones.text_input(
|
161 |
+
label="MODELO", on_change=None, label_visibility="visible",
|
162 |
+
value=self.get_global('settings').get('model'))
|
163 |
+
tokenizer = configuraciones.text_input(
|
164 |
+
label="TOKENIZER", on_change=None, label_visibility="visible",
|
165 |
+
value=self.get_global('settings').get('tokenizer'))
|
166 |
+
|
167 |
+
if configuraciones.button(label="Configurar", type="secondary", use_container_width=True,
|
168 |
+
help="Actualice configuraciones"):
|
169 |
+
self.actualizar_modelo_tokenizer(modelo, tokenizer)
|
170 |
+
|
171 |
+
def resultados(self, placeholder):
|
172 |
+
resultados = placeholder.container()
|
173 |
+
|
174 |
+
if self.get_global('img_src', None) is not None:
|
175 |
+
resultados.image(
|
176 |
+
image=self.get_global('img_src').getvalue(),
|
177 |
+
caption="Su resultado",
|
178 |
+
use_column_width="auto",
|
179 |
+
channels="RGB",
|
180 |
+
output_format="auto"
|
181 |
+
)
|
182 |
+
if self.get_global('predicciones', None) is not None:
|
183 |
+
for i, predict in enumerate(self.get_global('predicciones', [])):
|
184 |
+
resultados.image(
|
185 |
+
image=predict,
|
186 |
+
caption=f"Su predicción {i}",
|
187 |
+
use_column_width="auto",
|
188 |
+
channels="RGB",
|
189 |
+
output_format="auto"
|
190 |
+
)
|
191 |
+
|
192 |
+
def agregar_card_base(self, columna):
|
193 |
+
card_principal = columna.container()
|
194 |
+
|
195 |
+
columna_inputs, columna_outputs = card_principal.columns(
|
196 |
+
[0.3, 0.7], gap="small")
|
197 |
+
|
198 |
+
self.expander_instrucciones(columna_inputs)
|
199 |
+
self.expander_imagen_base(columna_inputs)
|
200 |
+
self.expander_configuraciones(columna_inputs)
|
201 |
+
columna_inputs.markdown(unsafe_allow_html=False,
|
202 |
+
help=None,
|
203 |
+
body="""
|
204 |
+
**Introduzca palabras de búsqueda**:
|
205 |
+
""")
|
206 |
+
prompt = columna_inputs.text_input(
|
207 |
+
label="Prompt", on_change=None, label_visibility="visible",
|
208 |
+
value=self.get_global('prompt'))
|
209 |
+
if columna_inputs.button(label="Detectar / Predecir", help="Realizar Predicciones",
|
210 |
+
type="secondary", use_container_width=True):
|
211 |
+
self.procesar_imagen(prompt)
|
212 |
+
self.resultados(columna_outputs)
|
213 |
+
|
214 |
+
def build(self):
|
215 |
+
|
216 |
+
columna_principal = self.get_body().columns(1, gap="small")[0]
|
217 |
+
self.agregar_card_base(columna_principal)
|
218 |
+
|
219 |
+
|
220 |
+
if __name__ == "__main__":
|
221 |
+
Home(init_page=True).build()
|
core/controllers/main_process.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
2 |
+
from io import BytesIO
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import requests
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
|
9 |
+
class Generador():
|
10 |
+
def __init__(self, configuraciones):
|
11 |
+
self.modelo = configuraciones.get('model')
|
12 |
+
self.tokenizer = configuraciones.get('tokenizer')
|
13 |
+
|
14 |
+
def generar_prediccion(self, imagen_bytes, new_prompt):
|
15 |
+
respuestas = []
|
16 |
+
try:
|
17 |
+
# Inicializamos los procesadores y el modelo
|
18 |
+
procesador = AutoProcessor.from_pretrained(self.tokenizer)
|
19 |
+
modelo = CLIPSegForImageSegmentation.from_pretrained(self.modelo)
|
20 |
+
# Procesamos nuestra imagen y objetos
|
21 |
+
prompts = new_prompt.split(',')
|
22 |
+
inputs = procesador(text=prompts, images=[imagen_bytes] * len(prompts), padding=True, return_tensors="pt")
|
23 |
+
outputs = modelo(**inputs)
|
24 |
+
logits = outputs.logits
|
25 |
+
predicciones = outputs.logits.unsqueeze(1)
|
26 |
+
|
27 |
+
# Creamos un espacio para cada uno de los prompts
|
28 |
+
_, cajas = plt.subplots(1, len(prompts), figsize=(15, 4))
|
29 |
+
# por cada caja, agregamos una predicción
|
30 |
+
for indice, caja in enumerate(cajas.flatten()):
|
31 |
+
caja.axis('off')
|
32 |
+
_img = torch.sigmoid(predicciones[indice][0]).detach().numpy()
|
33 |
+
#caja.imshow(_img)
|
34 |
+
#caja.text(0, -15, prompts[indice])
|
35 |
+
respuestas.append(_img)
|
36 |
+
|
37 |
+
except Exception as error:
|
38 |
+
print(f"No es Chems\n{error}")
|
39 |
+
finally:
|
40 |
+
self.prediccion = respuestas
|
41 |
+
|
42 |
+
|
43 |
+
|
core/controllers/pages_controller.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as BaseBuilder
|
2 |
+
from PIL import Image
|
3 |
+
import json
|
4 |
+
|
5 |
+
|
6 |
+
class Page():
|
7 |
+
def __init__(self):
|
8 |
+
self.__ = BaseBuilder
|
9 |
+
self.imgg = Image
|
10 |
+
|
11 |
+
def builder(self):
|
12 |
+
return self.__
|
13 |
+
|
14 |
+
def new_page(self, title: str, icon=str(), color_divider="rainbow"):
|
15 |
+
self.builder().set_page_config(page_title=title,
|
16 |
+
page_icon=icon,
|
17 |
+
layout="wide")
|
18 |
+
self.builder().title(f"Clasificación de imágenes con Visión Artificial",
|
19 |
+
anchor="titulo-proyecto",
|
20 |
+
help=None)
|
21 |
+
self.builder().subheader(f"{title} {icon}",
|
22 |
+
anchor="titulo-pagina",
|
23 |
+
divider=color_divider,
|
24 |
+
help=None)
|
25 |
+
self.__body = self.builder().empty()
|
26 |
+
|
27 |
+
def get_body(self):
|
28 |
+
return self.__body
|
29 |
+
|
30 |
+
def init_globals(self, globals=dict({})):
|
31 |
+
for _k, _v in globals.items():
|
32 |
+
if self.get_global(_k, None) is None:
|
33 |
+
self.set_global(_k, _v)
|
34 |
+
|
35 |
+
def set_global(self, key=str(), value=None):
|
36 |
+
self.builder().session_state[key] = value
|
37 |
+
|
38 |
+
def get_global(self, key=str(), default=None, is_secret=False):
|
39 |
+
if is_secret:
|
40 |
+
return self.builder().secrets.get(key, default)
|
41 |
+
else:
|
42 |
+
return self.builder().session_state.get(key, default)
|
43 |
+
|
44 |
+
def cargar_css(self, archivo_css=str("default")):
|
45 |
+
ruta = f"core/estilos/{archivo_css}.css"
|
46 |
+
try:
|
47 |
+
with open(ruta) as archivo:
|
48 |
+
self.builder().markdown(
|
49 |
+
f'<style>{archivo.read()}</style>', unsafe_allow_html=True)
|
50 |
+
except Exception as er:
|
51 |
+
print(f"Error:\n{er}")
|
core/estilos/home.css
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Estilo Cards */
|
2 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"] {
|
3 |
+
background-color: #070707;
|
4 |
+
box-shadow: 1px 1px 2px 2px #000000d2;
|
5 |
+
box-sizing: border-box;
|
6 |
+
padding: 4px 0px 20px 0px;
|
7 |
+
border-radius: 14px;
|
8 |
+
backdrop-filter: blur(4px);
|
9 |
+
transition: ease-out;
|
10 |
+
transition-property: background-color box-shadow transition;
|
11 |
+
transition-duration: 88ms;
|
12 |
+
}
|
13 |
+
|
14 |
+
/* Estilo Cards:Hover*/
|
15 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]:hover {
|
16 |
+
border: 2px thin;
|
17 |
+
box-shadow: 2px 2px 3px 3px #000000fe;
|
18 |
+
transition: ease-in;
|
19 |
+
transition-property: box-shadow transition;
|
20 |
+
transition-duration: 110ms;
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
/* Interno Card: Titulo */
|
25 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="element-container"]>.stHeadingContainer {
|
26 |
+
text-align: center;
|
27 |
+
align-self: center;
|
28 |
+
}
|
29 |
+
|
30 |
+
/* Interno Card: Texto */
|
31 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="element-container"]>.stTextLabelWrapper>[data-testid="stText"] {
|
32 |
+
padding: 0px 8px 0px 8px;
|
33 |
+
color: white;
|
34 |
+
}
|
35 |
+
|
36 |
+
/* Interno Card: Markup */
|
37 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="element-container"]>.stMarkdown>[data-testid="stMarkdownContainer"] {
|
38 |
+
padding: 0px 8px 0px 8px;
|
39 |
+
color: white;
|
40 |
+
}
|
41 |
+
|
42 |
+
/* Interno Card: Row imagenes */
|
43 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="stHorizontalBlock"] {
|
44 |
+
padding: 0px 8px 0px 8px;
|
45 |
+
display: flex;
|
46 |
+
}
|
47 |
+
|
48 |
+
/* Interno Card: Imágenes */
|
49 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="stHorizontalBlock"]>[data-testid="column"] {
|
50 |
+
display: flex;
|
51 |
+
justify-content: center;
|
52 |
+
align-items: center;
|
53 |
+
}
|
54 |
+
|
55 |
+
/* Interno Card: Botones */
|
56 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="element-container"]>[data-testid="stButton"]>.stTooltipIcon>div>[data-testid="stTooltipIcon"]>[data-testid="tooltipHoverTarget"]>button {
|
57 |
+
padding: 8px;
|
58 |
+
display: flex;
|
59 |
+
justify-content: center;
|
60 |
+
text-align: center;
|
61 |
+
width: 100%;
|
62 |
+
background-color: crimson;
|
63 |
+
color: white;
|
64 |
+
}
|
65 |
+
|
66 |
+
|
67 |
+
/* Interno Card: Expander */
|
68 |
+
[data-testid="stVerticalBlock"]>[style*="flex-direction: column;"]>[data-testid="stVerticalBlock"]>[data-testid="stExpander"]>ul {
|
69 |
+
background-color: transparent;
|
70 |
+
box-shadow: none;
|
71 |
+
border: none;
|
72 |
+
}
|
73 |
+
|
74 |
+
.st-by {
|
75 |
+
color: white !important;
|
76 |
+
}
|
core/estilos/main.css
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Contenedor principal de la aplicación*/
|
2 |
+
section>[data-testid="block-container"]{
|
3 |
+
height: 100vh;
|
4 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pandas
|
3 |
+
transformers
|
4 |
+
transformers[torch]
|
5 |
+
transformers[tf-cpu]
|
6 |
+
transformers[flax]
|
7 |
+
Pillow
|
8 |
+
requests
|
9 |
+
matplotlib
|