Spaces:
Sleeping
Sleeping
matiasbonfanti
commited on
Commit
•
1f3531d
1
Parent(s):
5dd7a53
Update app
Browse files- .gitattributes +3 -0
- Ambito2.jpg +0 -0
- Clarin2.jpg +3 -0
- Clarin3.jpg +3 -0
- Popular.jpg +3 -0
- app.py +109 -90
.gitattributes
CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
Clarin2.jpg filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Clarin3.jpg filter=lfs diff=lfs merge=lfs -text
|
37 |
+
Popular.jpg filter=lfs diff=lfs merge=lfs -text
|
Ambito2.jpg
ADDED
Clarin2.jpg
ADDED
Git LFS Details
|
Clarin3.jpg
ADDED
Git LFS Details
|
Popular.jpg
ADDED
Git LFS Details
|
app.py
CHANGED
@@ -1,119 +1,138 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import
|
4 |
-
|
|
|
|
|
|
|
|
|
5 |
import os
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
headers = {
|
17 |
-
"accept": "application/json",
|
18 |
-
"Authorization": yelpkey
|
19 |
-
}
|
20 |
-
|
21 |
-
response = requests.get(url, headers=headers)
|
22 |
-
response = response.json()
|
23 |
-
review = response['reviews'][0]['text']
|
24 |
-
user = response['reviews'][0]['user']['name']
|
25 |
-
image = response['reviews'][0]['user']['image_url']
|
26 |
-
link = response['reviews'][0]['url']
|
27 |
-
calificacion = response['reviews'][0]['rating']
|
28 |
-
creacion = response['reviews'][0]['time_created']
|
29 |
-
openai.api_key = openkey
|
30 |
-
traductor = GoogleTranslator(source='en', target='es')
|
31 |
-
traduccion = traductor.translate(review)
|
32 |
-
response2 = openai.Completion.create(
|
33 |
-
model="text-davinci-003",
|
34 |
-
prompt= f"Responder la siguiente reseña. nombre:{user}. {review}",
|
35 |
-
temperature=0.5,
|
36 |
-
max_tokens=300,
|
37 |
-
top_p=1,
|
38 |
-
frequency_penalty=0,
|
39 |
-
presence_penalty=0
|
40 |
-
)
|
41 |
-
response2 = response2.choices[0].text
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
|
|
44 |
|
45 |
-
|
|
|
|
|
46 |
|
|
|
|
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
gr.Markdown(
|
52 |
"""
|
53 |
<p align="center">
|
54 |
-
<img width = 600 src="https://
|
55 |
-
|
56 |
-
<img width = 400 src="https://raw.githubusercontent.com/BonfantiMatias/images/main/proyecto2.jpg">
|
57 |
-
</p>
|
58 |
-
<p align="center">
|
59 |
-
<img width=1024 src="https://raw.githubusercontent.com/BonfantiMatias/images/main/COMOFUNCIONA.png">
|
60 |
</p>
|
|
|
61 |
"""
|
62 |
)
|
63 |
-
|
64 |
-
|
65 |
-
<p align="center">
|
66 |
-
</p>
|
67 |
-
<p align="center">
|
68 |
-
</p>
|
69 |
-
"""
|
70 |
-
)
|
71 |
-
gr.Markdown("# Ingresar el Negocio")
|
72 |
gr.Markdown(
|
73 |
"""
|
74 |
-
- Puede
|
75 |
-
-
|
76 |
"""
|
77 |
)
|
78 |
-
with gr.
|
79 |
-
seed = gr.
|
80 |
-
with gr.Row():
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
|
89 |
-
gr.Markdown("# Mediante la Api Yelp Fusion se obtienen los datos de la ultima reseña del local seleccionado")
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
with gr.Row():
|
94 |
-
|
|
|
|
|
|
|
95 |
with gr.Column():
|
96 |
-
|
97 |
-
review = gr.Textbox(label="Reseña Cliente")
|
98 |
-
calificacion = gr.Textbox(label="Calificacion de la reseña")
|
99 |
-
creacion = gr.Textbox(label="Fecha de creacion de la reseña")
|
100 |
with gr.Column():
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
gr.Markdown("# Traduccion de la reseña al idioma español")
|
106 |
-
with gr.Row():
|
107 |
-
traduccion = gr.Textbox(label="Traduccion reseña")
|
108 |
|
109 |
-
gr.Markdown("# Respuesta de la reseña implementando el modelo GPT-3 de OpenAi")
|
110 |
with gr.Row():
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
btn = gr.Button("Iniciar Modelo")
|
115 |
-
btn.click(generate_text, inputs=[seed], outputs=[review,user,image,response2,traduccion,calificacion,creacion])
|
116 |
-
|
117 |
-
|
118 |
if __name__ == "__main__":
|
119 |
demo.launch()
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
+
import cv2
|
4 |
+
from matplotlib import pyplot as plt
|
5 |
+
import numpy as np
|
6 |
+
from typing import List
|
7 |
+
from ultralytics import YOLO
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
import os
|
11 |
+
import shutil
|
12 |
|
13 |
+
|
14 |
+
def recortar_notas(imagen_path: str) -> int:
|
15 |
+
if os.path.exists('deteccion'):
|
16 |
+
shutil.rmtree('deteccion')
|
17 |
+
if os.path.exists('diarios'):
|
18 |
+
shutil.rmtree('diarios')
|
19 |
+
if os.path.exists('recorte'):
|
20 |
+
shutil.rmtree('recorte')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
model = YOLO("seg-640.pt")
|
23 |
+
results = model.predict(source=imagen_path, save=True, save_txt=True, project="diarios", classes=0, conf=0.5)
|
24 |
+
|
25 |
+
# cargar imagen tif
|
26 |
+
image = cv2.imread(imagen_path)
|
27 |
+
|
28 |
+
# crear una carpeta para guardar los recortes
|
29 |
+
filename = os.path.splitext(os.path.basename(results[0].path))[0]
|
30 |
+
foldername = os.path.splitext(filename)[0]
|
31 |
+
if not os.path.exists(f"recorte/{foldername}"):
|
32 |
+
os.makedirs(f"recorte/{foldername}")
|
33 |
+
|
34 |
+
datos = results[0].masks.xy
|
35 |
|
36 |
+
num_notas = 0
|
37 |
|
38 |
+
for i, coords in enumerate(datos):
|
39 |
+
# coordenadas de segmentación
|
40 |
+
coordenadas = np.array(coords)
|
41 |
|
42 |
+
# crear una máscara vacía
|
43 |
+
mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
|
44 |
|
45 |
+
# dibujar un polígono en la máscara
|
46 |
+
cv2.fillPoly(mask, [coordenadas.astype(np.int32)], 255)
|
47 |
+
|
48 |
+
# aplicar la máscara a la imagen
|
49 |
+
masked = cv2.bitwise_and(image, image, mask=mask)
|
50 |
+
|
51 |
+
# recortar la imagen utilizando la caja delimitadora de la máscara
|
52 |
+
x, y, w, h = cv2.boundingRect(mask)
|
53 |
+
recorte = masked[y:y+h, x:x+w]
|
54 |
+
|
55 |
+
# guardar el recorte como una imagen
|
56 |
+
recorte_path = os.path.join(f"recorte/{foldername}", f"nota {i}.jpg")
|
57 |
+
cv2.imwrite(recorte_path, recorte)
|
58 |
+
|
59 |
+
num_notas += 1
|
60 |
+
|
61 |
+
noticias(foldername)
|
62 |
+
segmentacion = f"diarios/predict/{filename}.png"
|
63 |
+
segmentacion = cv2.imread(segmentacion)
|
64 |
+
recorte1 = f"recorte/{foldername}/nota 0.jpg"
|
65 |
+
prediccion = f"deteccion/{foldername}/predict/nota 0.jpg"
|
66 |
+
|
67 |
+
try:
|
68 |
+
recorte2 = f"recorte/{foldername}/nota 1.jpg"
|
69 |
+
prediccion2 = f"deteccion/{foldername}/predict/nota 1.jpg"
|
70 |
+
if not os.path.exists(recorte2):
|
71 |
+
raise Exception(f"El archivo {recorte2} no existe.")
|
72 |
+
except:
|
73 |
+
recorte2 = "sin_nota.jpg"
|
74 |
+
prediccion2 = "sin_nota.jpg"
|
75 |
+
|
76 |
+
return segmentacion, prediccion, prediccion2, num_notas
|
77 |
|
78 |
+
def noticias(carpeta):
|
79 |
+
nombre = carpeta
|
80 |
+
model = YOLO("best-detect.pt")
|
81 |
+
results = model.predict(source=f"recorte/{nombre}", save=True, save_txt=True,project=f"deteccion/{nombre}", conf=0.75) # save plotted images
|
82 |
+
return print(f"Imagen {nombre} procesada correctamente")
|
83 |
|
84 |
with gr.Blocks() as demo:
|
85 |
+
gr.Markdown(
|
86 |
+
"""
|
87 |
+
<h1 align="center"> IA por la Identidad
|
88 |
+
</h1>
|
89 |
+
<h2 align="center"> Dathaton
|
90 |
+
</h2>
|
91 |
+
"""
|
92 |
+
)
|
93 |
gr.Markdown(
|
94 |
"""
|
95 |
<p align="center">
|
96 |
+
<img width = 600 src="https://raw.githubusercontent.com/BonfantiMatias/images/main/banner%20fundaciones.jpeg">
|
97 |
+
|
|
|
|
|
|
|
|
|
98 |
</p>
|
99 |
+
|
100 |
"""
|
101 |
)
|
102 |
+
|
103 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
gr.Markdown(
|
105 |
"""
|
106 |
+
- Puede Seleccionar una de las imagenes de ejemplo o subir una desde su pc
|
107 |
+
- Para borrar la imagen que esta en la ventana de procesamiento debe presionar la "X" que se encuentra en el vertice superior derecho
|
108 |
"""
|
109 |
)
|
110 |
+
with gr.Row():
|
111 |
+
seed = gr.components.Image(type="filepath", label="Input")
|
112 |
+
with gr.Row():
|
113 |
+
with gr.Column():
|
114 |
+
gr.Examples(["Ambito2.jpg"], inputs=[seed])
|
115 |
+
gr.Examples(["Clarin2.jpg"], inputs=[seed])
|
116 |
+
|
117 |
+
with gr.Column():
|
118 |
+
gr.Examples(["Popular.jpg"], inputs=[seed])
|
119 |
+
gr.Examples(["Clarin3.jpg"], inputs=[seed])
|
120 |
|
|
|
|
|
|
|
|
|
121 |
with gr.Row():
|
122 |
+
notas = gr.Label(label="Numero de Notas")
|
123 |
+
|
124 |
+
with gr.Row():
|
125 |
+
|
126 |
with gr.Column():
|
127 |
+
segmentacion = gr.Image(label="Segmentacion Notas")
|
|
|
|
|
|
|
128 |
with gr.Column():
|
129 |
+
prediccion = gr.Image(label="Prediccion Primera Nota")
|
130 |
+
with gr.Column():
|
131 |
+
prediccion2 = gr.Image(label="Prediccion Segunda Nota")
|
|
|
|
|
|
|
|
|
132 |
|
|
|
133 |
with gr.Row():
|
134 |
+
btn = gr.Button("Procesar Imagen")
|
135 |
+
btn.click(recortar_notas, inputs=[seed], outputs=[segmentacion, prediccion, prediccion2, notas])
|
136 |
+
|
|
|
|
|
|
|
|
|
137 |
if __name__ == "__main__":
|
138 |
demo.launch()
|