Spaces:
Running
on
Zero
Running
on
Zero
Use Zero
Browse files- README.md +1 -1
- app_omega(legacy).py +264 -0
- app_zero.py +329 -0
- prompts.py +123 -0
- requirements.txt +3 -1
- utils.py +54 -0
README.md
CHANGED
@@ -8,7 +8,7 @@ pinned: false
|
|
8 |
license: cc-by-nc-sa-4.0
|
9 |
suggested_hardware: t4-small
|
10 |
suggested_storage: small
|
11 |
-
app_file:
|
12 |
fullWidth: true
|
13 |
models:
|
14 |
- Iker/ClickbaitFighter-10B
|
|
|
8 |
license: cc-by-nc-sa-4.0
|
9 |
suggested_hardware: t4-small
|
10 |
suggested_storage: small
|
11 |
+
app_file: app_zero.py
|
12 |
fullWidth: true
|
13 |
models:
|
14 |
- Iker/ClickbaitFighter-10B
|
app_omega(legacy).py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import gradio as gr
|
4 |
+
from download_url import download_text_and_title
|
5 |
+
from cache_system import CacheHandler
|
6 |
+
from collections import OrderedDict
|
7 |
+
from typing import Any, Iterable, List
|
8 |
+
import datetime
|
9 |
+
import json
|
10 |
+
|
11 |
+
server = os.environ.get("SERVER") or "http://localhost:7861/generate"
|
12 |
+
auth_token = os.environ.get("TOKEN") or True
|
13 |
+
API_KEY = os.environ.get("API_KEY") or None
|
14 |
+
|
15 |
+
|
16 |
+
total_runs = 0
|
17 |
+
|
18 |
+
|
19 |
+
def call_vllm_server(tittle, body, mode, stream=True):
|
20 |
+
api_url = server
|
21 |
+
headers = {"User-Agent": "Test Client"}
|
22 |
+
|
23 |
+
json = {
|
24 |
+
"n": 1,
|
25 |
+
"tittle": tittle,
|
26 |
+
"body": body,
|
27 |
+
"mode": mode,
|
28 |
+
"max_tokens": 4096,
|
29 |
+
"temperature": 0.15,
|
30 |
+
"top_p": 0.1,
|
31 |
+
"top_k": 40,
|
32 |
+
"repetition_penalty": 1.1,
|
33 |
+
"stop": [
|
34 |
+
"<s>",
|
35 |
+
"</s>",
|
36 |
+
"\\n",
|
37 |
+
"<|im_end|>",
|
38 |
+
],
|
39 |
+
"stream": stream,
|
40 |
+
"api_key": API_KEY,
|
41 |
+
}
|
42 |
+
response = requests.post(api_url, headers=headers, json=json)
|
43 |
+
return response
|
44 |
+
|
45 |
+
|
46 |
+
def get_streaming_response(response: requests.Response) -> Iterable[List[str]]:
|
47 |
+
for chunk in response.iter_lines(
|
48 |
+
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
|
49 |
+
):
|
50 |
+
if chunk:
|
51 |
+
data = json.loads(chunk.decode("utf-8"))
|
52 |
+
output = data["text"]
|
53 |
+
yield output
|
54 |
+
|
55 |
+
|
56 |
+
class HuggingFaceDatasetSaver_custom(gr.HuggingFaceDatasetSaver):
|
57 |
+
def _deserialize_components(
|
58 |
+
self,
|
59 |
+
data_dir,
|
60 |
+
flag_data: list[Any],
|
61 |
+
flag_option: str = "",
|
62 |
+
username: str = "",
|
63 |
+
) -> tuple[dict[Any, Any], list[Any]]:
|
64 |
+
"""Deserialize components and return the corresponding row for the flagged sample.
|
65 |
+
|
66 |
+
Images/audio are saved to disk as individual files.
|
67 |
+
"""
|
68 |
+
# Components that can have a preview on dataset repos
|
69 |
+
file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"}
|
70 |
+
|
71 |
+
# Generate the row corresponding to the flagged sample
|
72 |
+
features = OrderedDict()
|
73 |
+
row = []
|
74 |
+
for component, sample in zip(self.components, flag_data):
|
75 |
+
label = component.label or ""
|
76 |
+
features[label] = {"dtype": "string", "_type": "Value"}
|
77 |
+
row.append(sample)
|
78 |
+
|
79 |
+
features["flag"] = {"dtype": "string", "_type": "Value"}
|
80 |
+
features["username"] = {"dtype": "string", "_type": "Value"}
|
81 |
+
row.append(flag_option)
|
82 |
+
row.append(username)
|
83 |
+
return features, row
|
84 |
+
|
85 |
+
|
86 |
+
def finish_generation(text: str) -> str:
|
87 |
+
return f"{text}\n\n⬇️ Ayuda a mejorar la herramienta marcando si el resumen es correcto o no.⬇️"
|
88 |
+
|
89 |
+
|
90 |
+
def generate_text(
|
91 |
+
url: str, mode: int, progress=gr.Progress(track_tqdm=False)
|
92 |
+
) -> (str, str):
|
93 |
+
global cache_handler
|
94 |
+
global total_runs
|
95 |
+
|
96 |
+
total_runs += 1
|
97 |
+
print(f"Total runs: {total_runs}. Last run: {datetime.datetime.now()}")
|
98 |
+
|
99 |
+
url = url.strip()
|
100 |
+
|
101 |
+
if url.startswith("https://twitter.com/") or url.startswith("https://x.com/"):
|
102 |
+
yield (
|
103 |
+
"🤖 Vaya, parece que has introducido la url de un tweet. No puedo acceder a tweets, tienes que introducir la URL de una noticia.",
|
104 |
+
"❌❌❌ Si el tweet contiene una noticia, dame la URL de la noticia ❌❌❌",
|
105 |
+
"Error",
|
106 |
+
)
|
107 |
+
return (
|
108 |
+
"🤖 Vaya, parece que has introducido la url de un tweet. No puedo acceder a tweets, tienes que introducir la URL de una noticia.",
|
109 |
+
"❌❌❌ Si el tweet contiene una noticia, dame la URL de la noticia ❌❌❌",
|
110 |
+
"Error",
|
111 |
+
)
|
112 |
+
|
113 |
+
# 1) Download the article
|
114 |
+
|
115 |
+
progress(0, desc="🤖 Accediendo a la noticia")
|
116 |
+
|
117 |
+
# First, check if the URL is in the cache
|
118 |
+
title, text, temp = cache_handler.get_from_cache(url, mode)
|
119 |
+
if title is not None and text is not None and temp is not None:
|
120 |
+
temp = finish_generation(temp)
|
121 |
+
yield title, temp, text
|
122 |
+
return title, temp, text
|
123 |
+
else:
|
124 |
+
try:
|
125 |
+
title, text, url = download_text_and_title(url)
|
126 |
+
except Exception as e:
|
127 |
+
print(e)
|
128 |
+
title = None
|
129 |
+
text = None
|
130 |
+
|
131 |
+
if title is None or text is None:
|
132 |
+
yield (
|
133 |
+
"🤖 No he podido acceder a la notica, asegurate que la URL es correcta y que es posible acceder a la noticia desde un navegador.",
|
134 |
+
"❌❌❌ Inténtalo de nuevo ❌❌❌",
|
135 |
+
"Error",
|
136 |
+
)
|
137 |
+
return (
|
138 |
+
"🤖 No he podido acceder a la notica, asegurate que la URL es correcta y que es posible acceder a la noticia desde un navegador.",
|
139 |
+
"❌❌❌ Inténtalo de nuevo ❌❌❌",
|
140 |
+
"Error",
|
141 |
+
)
|
142 |
+
|
143 |
+
# Test if the redirected and clean url is in the cache
|
144 |
+
_, _, temp = cache_handler.get_from_cache(url, mode, second_try=True)
|
145 |
+
if temp is not None:
|
146 |
+
temp = finish_generation(temp)
|
147 |
+
yield title, temp, text
|
148 |
+
return title, temp, text
|
149 |
+
|
150 |
+
progress(0.5, desc="🤖 Leyendo noticia")
|
151 |
+
|
152 |
+
try:
|
153 |
+
response = call_vllm_server(title, text, mode, stream=True)
|
154 |
+
for h in get_streaming_response(response):
|
155 |
+
temp = h[0]
|
156 |
+
yield title, temp, text
|
157 |
+
|
158 |
+
except Exception as e:
|
159 |
+
print(e)
|
160 |
+
yield (
|
161 |
+
"🤖 El servidor no se encuentra disponible.",
|
162 |
+
"❌❌❌ Inténtalo de nuevo más tarde ❌❌❌",
|
163 |
+
"Error",
|
164 |
+
)
|
165 |
+
return (
|
166 |
+
"🤖 El servidor no se encuentra disponible.",
|
167 |
+
"❌❌❌ Inténtalo de nuevo más tarde ❌❌❌",
|
168 |
+
"Error",
|
169 |
+
)
|
170 |
+
|
171 |
+
cache_handler.add_to_cache(
|
172 |
+
url=url, title=title, text=text, summary_type=mode, summary=temp
|
173 |
+
)
|
174 |
+
temp = finish_generation(temp)
|
175 |
+
yield title, temp, text
|
176 |
+
|
177 |
+
hits, misses, cache_len = cache_handler.get_cache_stats()
|
178 |
+
print(
|
179 |
+
f"Hits: {hits}, misses: {misses}, cache length: {cache_len}. Percent hits: {round(hits/(hits+misses)*100,2)}%."
|
180 |
+
)
|
181 |
+
return title, temp, text
|
182 |
+
|
183 |
+
|
184 |
+
cache_handler = CacheHandler(max_cache_size=1000)
|
185 |
+
hf_writer = HuggingFaceDatasetSaver_custom(
|
186 |
+
auth_token, "Iker/Clickbait-News", private=True, separate_dirs=False
|
187 |
+
)
|
188 |
+
|
189 |
+
|
190 |
+
demo = gr.Interface(
|
191 |
+
generate_text,
|
192 |
+
inputs=[
|
193 |
+
gr.Textbox(
|
194 |
+
label="🌐 URL de la noticia",
|
195 |
+
info="Introduce la URL de la noticia que deseas resumir.",
|
196 |
+
value="https://ikergarcia1996.github.io/Iker-Garcia-Ferrero/",
|
197 |
+
interactive=True,
|
198 |
+
),
|
199 |
+
gr.Slider(
|
200 |
+
minimum=0,
|
201 |
+
maximum=100,
|
202 |
+
step=50,
|
203 |
+
value=50,
|
204 |
+
label="🎚️ Nivel de resumen",
|
205 |
+
info="""¿Hasta qué punto quieres resumir la noticia?
|
206 |
+
|
207 |
+
Si solo deseas un resumen, selecciona 0.
|
208 |
+
|
209 |
+
Si buscas un resumen y desmontar el clickbait, elige 50.
|
210 |
+
|
211 |
+
Para obtener solo la respuesta al clickbait, selecciona 100""",
|
212 |
+
interactive=True,
|
213 |
+
),
|
214 |
+
],
|
215 |
+
outputs=[
|
216 |
+
gr.Textbox(
|
217 |
+
label="📰 Titular de la noticia",
|
218 |
+
interactive=False,
|
219 |
+
placeholder="Aquí aparecerá el título de la noticia",
|
220 |
+
),
|
221 |
+
gr.Textbox(
|
222 |
+
label="🗒️ Resumen",
|
223 |
+
interactive=False,
|
224 |
+
placeholder="Aquí aparecerá el resumen de la noticia.",
|
225 |
+
),
|
226 |
+
gr.Textbox(
|
227 |
+
label="Noticia completa",
|
228 |
+
visible=False,
|
229 |
+
render=False,
|
230 |
+
interactive=False,
|
231 |
+
placeholder="Aquí aparecerá el resumen de la noticia.",
|
232 |
+
),
|
233 |
+
],
|
234 |
+
# title="⚔️ Clickbait Fighter! ⚔️",
|
235 |
+
thumbnail="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/logo2.png",
|
236 |
+
theme="JohnSmith9982/small_and_pretty",
|
237 |
+
description="""
|
238 |
+
<table>
|
239 |
+
<tr>
|
240 |
+
<td style="width:100%"><img src="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/head.png" align="right" width="100%"> </td>
|
241 |
+
</tr>
|
242 |
+
</table>
|
243 |
+
|
244 |
+
<p align="center"> <a href="https://www.omegaai.io/"> <img src="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/omegaai.png" align="center" width="15%"> </a> <a href="https://0dai.omegaai.io/"> <img src="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/0dai.png" align="center" width="15%"> </a></p>
|
245 |
+
|
246 |
+
<p align="justify">Esta Inteligencia Artificial es capaz de generar un resumen de una sola frase que revela la verdad detrás de un titular sensacionalista o clickbait. Solo tienes que introducir la URL de la noticia. La IA accederá a la noticia, la leerá y en cuestión de segundos generará un resumen de una sola frase que revele la verdad detrás del titular.</p>
|
247 |
+
|
248 |
+
🎚 Ajusta el nivel de resumen con el control deslizante. Cuanto maś alto, más corto será el resumen.
|
249 |
+
|
250 |
+
⌚ La IA se encuentra corriendo en un hardware bastante modesto, debería tardar menos de 30 segundos en generar el resumen, pero si muchos usuarios usan la app a la vez, tendrás que esperar tu turno.
|
251 |
+
|
252 |
+
💸 Este es un projecto sin ánimo de lucro, no se genera ningún tipo de ingreso con esta app. Los datos, la IA y el código se publicarán para su uso en la investigación académica. No puedes usar esta app para ningún uso comercial.
|
253 |
+
|
254 |
+
🧪 El modelo se encuentra en fase de desarrollo, si quieres ayudar a mejorarlo puedes usar los botones 👍 y 👎 para valorar el resumen. ¡Gracias por tu ayuda!""",
|
255 |
+
article="Esta Inteligencia Artificial ha sido generada por Iker García-Ferrero. Puedes saber más sobre mi trabajo en mi [página web](https://ikergarcia1996.github.io/Iker-Garcia-Ferrero/) o mi perfil de [X](https://twitter.com/iker_garciaf). Puedes ponerte en contacto conmigo a través de correo electrónico (ver web) y X.",
|
256 |
+
cache_examples=False,
|
257 |
+
allow_flagging="manual",
|
258 |
+
flagging_options=[("👍", "correct"), ("👎", "incorrect")],
|
259 |
+
flagging_callback=hf_writer,
|
260 |
+
concurrency_limit=20,
|
261 |
+
)
|
262 |
+
|
263 |
+
demo.queue(max_size=None)
|
264 |
+
demo.launch(share=False)
|
app_zero.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import os
|
3 |
+
from collections import OrderedDict
|
4 |
+
from typing import Any
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import spaces
|
8 |
+
import torch
|
9 |
+
from transformers import (
|
10 |
+
AutoModelForCausalLM,
|
11 |
+
AutoTokenizer,
|
12 |
+
GenerationConfig,
|
13 |
+
LogitsProcessorList,
|
14 |
+
TextStreamer,
|
15 |
+
)
|
16 |
+
|
17 |
+
from cache_system import CacheHandler
|
18 |
+
from download_url import download_text_and_title
|
19 |
+
from prompts import (
|
20 |
+
summarize_clickbait_large_prompt,
|
21 |
+
summarize_clickbait_short_prompt,
|
22 |
+
summarize_prompt,
|
23 |
+
)
|
24 |
+
from utils import StopAfterTokenIsGenerated
|
25 |
+
|
26 |
+
auth_token = os.environ.get("TOKEN") or True
|
27 |
+
|
28 |
+
total_runs = 0
|
29 |
+
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("Iker/ClickbaitFighter-10B-pro")
|
31 |
+
model = AutoModelForCausalLM.from_pretrained(
|
32 |
+
"Iker/ClickbaitFighter-10B-pro",
|
33 |
+
torch_dtype=torch.bfloat16,
|
34 |
+
device_map="auto",
|
35 |
+
# quantization_config=BitsAndBytesConfig(
|
36 |
+
# load_in_4bit=True,
|
37 |
+
# bnb_4bit_compute_dtype=torch.bfloat16,
|
38 |
+
# bnb_4bit_use_double_quant=True,
|
39 |
+
# ),
|
40 |
+
attn_implementation="flash_attention_2",
|
41 |
+
)
|
42 |
+
|
43 |
+
generation_config = GenerationConfig(
|
44 |
+
max_new_tokens=256, # Los resúmenes son cortos, no necesitamos más tokens
|
45 |
+
min_new_tokens=1, # No queremos resúmenes vacíos
|
46 |
+
do_sample=True, # Un poquito mejor que greedy sampling
|
47 |
+
num_beams=1,
|
48 |
+
use_cache=True, # Eficiencia
|
49 |
+
top_k=40,
|
50 |
+
top_p=0.1,
|
51 |
+
repetition_penalty=1.1, # Ayuda a evitar que el modelo entre en bucles
|
52 |
+
encoder_repetition_penalty=1.1, # Favorecemos que el modelo cite el texto original
|
53 |
+
temperature=0.15, # temperature baja para evitar que el modelo genere texto muy creativo.
|
54 |
+
eos_token_id=tokenizer.eos_token_id,
|
55 |
+
pad_token_id=tokenizer.eos_token_id,
|
56 |
+
)
|
57 |
+
|
58 |
+
stop_words = [
|
59 |
+
"<s>",
|
60 |
+
"</s>",
|
61 |
+
"\\n",
|
62 |
+
"[/INST]",
|
63 |
+
"[INST]",
|
64 |
+
"### User:",
|
65 |
+
"### Assistant:",
|
66 |
+
"###",
|
67 |
+
"<start_of_turn>",
|
68 |
+
"<end_of_turn>",
|
69 |
+
"<end_of_turn>\\n",
|
70 |
+
"<eos>",
|
71 |
+
"<|im_end|>",
|
72 |
+
]
|
73 |
+
|
74 |
+
|
75 |
+
stop_criteria = LogitsProcessorList(
|
76 |
+
[
|
77 |
+
StopAfterTokenIsGenerated(
|
78 |
+
stops=[
|
79 |
+
torch.tensor(tokenizer.encode(stop_word, add_special_tokens=False))
|
80 |
+
for stop_word in stop_words.copy()
|
81 |
+
],
|
82 |
+
eos_token_id=tokenizer.eos_token_id,
|
83 |
+
)
|
84 |
+
]
|
85 |
+
)
|
86 |
+
|
87 |
+
|
88 |
+
class HuggingFaceDatasetSaver_custom(gr.HuggingFaceDatasetSaver):
|
89 |
+
def _deserialize_components(
|
90 |
+
self,
|
91 |
+
data_dir,
|
92 |
+
flag_data: list[Any],
|
93 |
+
flag_option: str = "",
|
94 |
+
username: str = "",
|
95 |
+
) -> tuple[dict[Any, Any], list[Any]]:
|
96 |
+
"""Deserialize components and return the corresponding row for the flagged sample.
|
97 |
+
|
98 |
+
Images/audio are saved to disk as individual files.
|
99 |
+
"""
|
100 |
+
|
101 |
+
# Generate the row corresponding to the flagged sample
|
102 |
+
features = OrderedDict()
|
103 |
+
row = []
|
104 |
+
for component, sample in zip(self.components, flag_data):
|
105 |
+
label = component.label or ""
|
106 |
+
features[label] = {"dtype": "string", "_type": "Value"}
|
107 |
+
row.append(sample)
|
108 |
+
|
109 |
+
features["flag"] = {"dtype": "string", "_type": "Value"}
|
110 |
+
features["username"] = {"dtype": "string", "_type": "Value"}
|
111 |
+
row.append(flag_option)
|
112 |
+
row.append(username)
|
113 |
+
return features, row
|
114 |
+
|
115 |
+
|
116 |
+
def finish_generation(text: str) -> str:
|
117 |
+
return f"{text}\n\n⬇️ Ayuda a mejorar la herramienta marcando si el resumen es correcto o no.⬇️"
|
118 |
+
|
119 |
+
|
120 |
+
@spaces.GPU
|
121 |
+
def generate_text(
|
122 |
+
url: str, mode: int, progress=gr.Progress(track_tqdm=False)
|
123 |
+
) -> (str, str):
|
124 |
+
global cache_handler
|
125 |
+
global total_runs
|
126 |
+
|
127 |
+
total_runs += 1
|
128 |
+
print(f"Total runs: {total_runs}. Last run: {datetime.datetime.now()}")
|
129 |
+
|
130 |
+
url = url.strip()
|
131 |
+
|
132 |
+
if url.startswith("https://twitter.com/") or url.startswith("https://x.com/"):
|
133 |
+
yield (
|
134 |
+
"🤖 Vaya, parece que has introducido la url de un tweet. No puedo acceder a tweets, tienes que introducir la URL de una noticia.",
|
135 |
+
"❌❌❌ Si el tweet contiene una noticia, dame la URL de la noticia ❌❌❌",
|
136 |
+
"Error",
|
137 |
+
)
|
138 |
+
return (
|
139 |
+
"🤖 Vaya, parece que has introducido la url de un tweet. No puedo acceder a tweets, tienes que introducir la URL de una noticia.",
|
140 |
+
"❌❌❌ Si el tweet contiene una noticia, dame la URL de la noticia ❌❌❌",
|
141 |
+
"Error",
|
142 |
+
)
|
143 |
+
|
144 |
+
# 1) Download the article
|
145 |
+
|
146 |
+
progress(0, desc="🤖 Accediendo a la noticia")
|
147 |
+
|
148 |
+
# First, check if the URL is in the cache
|
149 |
+
title, text, temp = cache_handler.get_from_cache(url, mode)
|
150 |
+
if title is not None and text is not None and temp is not None:
|
151 |
+
temp = finish_generation(temp)
|
152 |
+
yield title, temp, text
|
153 |
+
return title, temp, text
|
154 |
+
else:
|
155 |
+
try:
|
156 |
+
title, text, url = download_text_and_title(url)
|
157 |
+
except Exception as e:
|
158 |
+
print(e)
|
159 |
+
title = None
|
160 |
+
text = None
|
161 |
+
|
162 |
+
if title is None or text is None:
|
163 |
+
yield (
|
164 |
+
"🤖 No he podido acceder a la notica, asegurate que la URL es correcta y que es posible acceder a la noticia desde un navegador.",
|
165 |
+
"❌❌❌ Inténtalo de nuevo ❌❌❌",
|
166 |
+
"Error",
|
167 |
+
)
|
168 |
+
return (
|
169 |
+
"🤖 No he podido acceder a la notica, asegurate que la URL es correcta y que es posible acceder a la noticia desde un navegador.",
|
170 |
+
"❌❌❌ Inténtalo de nuevo ❌❌❌",
|
171 |
+
"Error",
|
172 |
+
)
|
173 |
+
|
174 |
+
# Test if the redirected and clean url is in the cache
|
175 |
+
_, _, temp = cache_handler.get_from_cache(url, mode, second_try=True)
|
176 |
+
if temp is not None:
|
177 |
+
temp = finish_generation(temp)
|
178 |
+
yield title, temp, text
|
179 |
+
return title, temp, text
|
180 |
+
|
181 |
+
progress(0.5, desc="🤖 Leyendo noticia")
|
182 |
+
|
183 |
+
try:
|
184 |
+
if mode == 0:
|
185 |
+
prompt = summarize_prompt(title, text)
|
186 |
+
elif mode == 50:
|
187 |
+
prompt = summarize_clickbait_short_prompt(title, text)
|
188 |
+
elif mode == 100:
|
189 |
+
prompt = summarize_clickbait_large_prompt(title, text)
|
190 |
+
else:
|
191 |
+
raise ValueError("Mode not supported")
|
192 |
+
|
193 |
+
formatted_prompt = tokenizer.apply_chat_template(
|
194 |
+
[{"role": "user", "content": prompt}],
|
195 |
+
tokenize=False,
|
196 |
+
add_generation_prompt=True,
|
197 |
+
)
|
198 |
+
|
199 |
+
model_inputs = tokenizer(
|
200 |
+
[formatted_prompt], return_tensors="pt", add_special_tokens=False
|
201 |
+
)
|
202 |
+
|
203 |
+
streamer = TextStreamer(
|
204 |
+
tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True
|
205 |
+
)
|
206 |
+
|
207 |
+
model_output = model.generate(
|
208 |
+
**model_inputs.to(model.device),
|
209 |
+
streamer=streamer,
|
210 |
+
generation_config=generation_config,
|
211 |
+
logits_processor=stop_criteria,
|
212 |
+
)
|
213 |
+
|
214 |
+
yield title, streamer, text
|
215 |
+
|
216 |
+
temp = tokenizer.batch_decode(
|
217 |
+
model_output[:, model_inputs["input_ids"].shape[-1] :],
|
218 |
+
skip_special_tokens=True,
|
219 |
+
clean_up_tokenization_spaces=True,
|
220 |
+
)[0]
|
221 |
+
|
222 |
+
yield title, temp, text
|
223 |
+
|
224 |
+
except Exception as e:
|
225 |
+
print(e)
|
226 |
+
yield (
|
227 |
+
"🤖 El servidor no se encuentra disponible.",
|
228 |
+
"❌❌❌ Inténtalo de nuevo más tarde ❌❌❌",
|
229 |
+
"Error",
|
230 |
+
)
|
231 |
+
return (
|
232 |
+
"🤖 El servidor no se encuentra disponible.",
|
233 |
+
"❌❌❌ Inténtalo de nuevo más tarde ❌❌❌",
|
234 |
+
"Error",
|
235 |
+
)
|
236 |
+
|
237 |
+
cache_handler.add_to_cache(
|
238 |
+
url=url, title=title, text=text, summary_type=mode, summary=temp
|
239 |
+
)
|
240 |
+
temp = finish_generation(temp)
|
241 |
+
yield title, temp, text
|
242 |
+
|
243 |
+
hits, misses, cache_len = cache_handler.get_cache_stats()
|
244 |
+
print(
|
245 |
+
f"Hits: {hits}, misses: {misses}, cache length: {cache_len}. Percent hits: {round(hits/(hits+misses)*100,2)}%."
|
246 |
+
)
|
247 |
+
return title, temp, text
|
248 |
+
|
249 |
+
|
250 |
+
cache_handler = CacheHandler(max_cache_size=1000)
|
251 |
+
hf_writer = HuggingFaceDatasetSaver_custom(
|
252 |
+
auth_token, "Iker/Clickbait-News", private=True, separate_dirs=False
|
253 |
+
)
|
254 |
+
|
255 |
+
|
256 |
+
demo = gr.Interface(
|
257 |
+
generate_text,
|
258 |
+
inputs=[
|
259 |
+
gr.Textbox(
|
260 |
+
label="🌐 URL de la noticia",
|
261 |
+
info="Introduce la URL de la noticia que deseas resumir.",
|
262 |
+
value="https://ikergarcia1996.github.io/Iker-Garcia-Ferrero/",
|
263 |
+
interactive=True,
|
264 |
+
),
|
265 |
+
gr.Slider(
|
266 |
+
minimum=0,
|
267 |
+
maximum=100,
|
268 |
+
step=50,
|
269 |
+
value=50,
|
270 |
+
label="🎚️ Nivel de resumen",
|
271 |
+
info="""¿Hasta qué punto quieres resumir la noticia?
|
272 |
+
|
273 |
+
Si solo deseas un resumen, selecciona 0.
|
274 |
+
|
275 |
+
Si buscas un resumen y desmontar el clickbait, elige 50.
|
276 |
+
|
277 |
+
Para obtener solo la respuesta al clickbait, selecciona 100""",
|
278 |
+
interactive=True,
|
279 |
+
),
|
280 |
+
],
|
281 |
+
outputs=[
|
282 |
+
gr.Textbox(
|
283 |
+
label="📰 Titular de la noticia",
|
284 |
+
interactive=False,
|
285 |
+
placeholder="Aquí aparecerá el título de la noticia",
|
286 |
+
),
|
287 |
+
gr.Textbox(
|
288 |
+
label="🗒️ Resumen",
|
289 |
+
interactive=False,
|
290 |
+
placeholder="Aquí aparecerá el resumen de la noticia.",
|
291 |
+
),
|
292 |
+
gr.Textbox(
|
293 |
+
label="Noticia completa",
|
294 |
+
visible=False,
|
295 |
+
render=False,
|
296 |
+
interactive=False,
|
297 |
+
placeholder="Aquí aparecerá el resumen de la noticia.",
|
298 |
+
),
|
299 |
+
],
|
300 |
+
# title="⚔️ Clickbait Fighter! ⚔️",
|
301 |
+
thumbnail="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/logo2.png",
|
302 |
+
theme="JohnSmith9982/small_and_pretty",
|
303 |
+
description="""
|
304 |
+
<table>
|
305 |
+
<tr>
|
306 |
+
<td style="width:100%"><img src="https://huggingface.co/spaces/Iker/ClickbaitFighter/resolve/main/head.png" align="right" width="100%"> </td>
|
307 |
+
</tr>
|
308 |
+
</table>
|
309 |
+
|
310 |
+
<p align="justify">Esta Inteligencia Artificial es capaz de generar un resumen de una sola frase que revela la verdad detrás de un titular sensacionalista o clickbait. Solo tienes que introducir la URL de la noticia. La IA accederá a la noticia, la leerá y en cuestión de segundos generará un resumen de una sola frase que revele la verdad detrás del titular.</p>
|
311 |
+
|
312 |
+
🎚 Ajusta el nivel de resumen con el control deslizante. Cuanto maś alto, más corto será el resumen.
|
313 |
+
|
314 |
+
⌚ La IA se encuentra corriendo en un hardware bastante modesto, debería tardar menos de 30 segundos en generar el resumen, pero si muchos usuarios usan la app a la vez, tendrás que esperar tu turno.
|
315 |
+
|
316 |
+
💸 Este es un projecto sin ánimo de lucro, no se genera ningún tipo de ingreso con esta app. Los datos, la IA y el código se publicarán para su uso en la investigación académica. No puedes usar esta app para ningún uso comercial.
|
317 |
+
|
318 |
+
🧪 El modelo se encuentra en fase de desarrollo, si quieres ayudar a mejorarlo puedes usar los botones 👍 y 👎 para valorar el resumen. ¡Gracias por tu ayuda!""",
|
319 |
+
article="Esta Inteligencia Artificial ha sido generada por Iker García-Ferrero. Puedes saber más sobre mi trabajo en mi [página web](https://ikergarcia1996.github.io/Iker-Garcia-Ferrero/) o mi perfil de [X](https://twitter.com/iker_garciaf). Puedes ponerte en contacto conmigo a través de correo electrónico (ver web) y X.",
|
320 |
+
cache_examples=False,
|
321 |
+
allow_flagging="manual",
|
322 |
+
flagging_options=[("👍", "correct"), ("👎", "incorrect")],
|
323 |
+
flagging_callback=hf_writer,
|
324 |
+
concurrency_limit=20,
|
325 |
+
)
|
326 |
+
|
327 |
+
|
328 |
+
demo.queue(max_size=None)
|
329 |
+
demo.launch(share=False)
|
prompts.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def summarize_clickbait_short_prompt(
|
2 |
+
headline: str,
|
3 |
+
body: str,
|
4 |
+
) -> str:
|
5 |
+
"""
|
6 |
+
Generate the prompt for the model.
|
7 |
+
|
8 |
+
Args:
|
9 |
+
headline (`str`):
|
10 |
+
The headline of the article.
|
11 |
+
body (`str`):
|
12 |
+
The body of the article.
|
13 |
+
Returns:
|
14 |
+
`str`: The formatted prompt.
|
15 |
+
"""
|
16 |
+
|
17 |
+
return (
|
18 |
+
f"Ahora eres una Inteligencia Artificial experta en desmontar titulares sensacionalistas o clickbait. "
|
19 |
+
f"Tu tarea consiste en analizar noticias con titulares sensacionalistas y "
|
20 |
+
f"generar un resumen de una sola frase que revele la verdad detrás del titular.\n"
|
21 |
+
f"Este es el titular de la noticia: {headline}\n"
|
22 |
+
f"El titular plantea una pregunta o proporciona información incompleta. "
|
23 |
+
f"Debes buscar en el cuerpo de la noticia una frase que responda lo que se sugiere en el título. "
|
24 |
+
f"Siempre que puedas cita el texto original, especialmente si se trata de una frase que alguien ha dicho. "
|
25 |
+
f"Si citas una frase que alguien ha dicho, usa comillas para indicar que es una cita. "
|
26 |
+
f"Usa siempre las mínimas palabras posibles. No es necesario que la respuesta sea una oración completa. "
|
27 |
+
f"Puede ser sólo el foco de la pregunta. "
|
28 |
+
f"Recuerda responder siempre en Español.\n"
|
29 |
+
f"Este es el cuerpo de la noticia:\n"
|
30 |
+
f"{body}\n"
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
def summarize_clickbait_large_prompt(
|
35 |
+
headline: str,
|
36 |
+
body: str,
|
37 |
+
) -> str:
|
38 |
+
"""
|
39 |
+
Generate the prompt for the model.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
headline (`str`):
|
43 |
+
The headline of the article.
|
44 |
+
body (`str`):
|
45 |
+
The body of the article.
|
46 |
+
Returns:
|
47 |
+
`str`: The formatted prompt.
|
48 |
+
"""
|
49 |
+
|
50 |
+
return (
|
51 |
+
f"Ahora eres una Inteligencia Artificial experta en desmontar titulares sensacionalistas o clickbait. "
|
52 |
+
f"Tu tarea consiste en analizar noticias con titulares sensacionalistas y "
|
53 |
+
f"generar un resumen de una sola frase que revele la verdad detrás del titular.\n"
|
54 |
+
f"Este es el titular de la noticia: {headline}\n"
|
55 |
+
f"El titular plantea una pregunta o proporciona información incompleta. "
|
56 |
+
f"Debes buscar en el cuerpo de la noticia una frase que responda lo que se sugiere en el título. "
|
57 |
+
f"Siempre que puedas cita el texto original, especialmente si se trata de una frase que alguien ha dicho. "
|
58 |
+
f"Recuerda responder siempre en Español.\n"
|
59 |
+
f"Este es el cuerpo de la noticia:\n"
|
60 |
+
f"{body}\n"
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
def summarize_prompt(
|
65 |
+
headline: str,
|
66 |
+
body: str,
|
67 |
+
) -> str:
|
68 |
+
"""
|
69 |
+
Generate the prompt for the model.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
headline (`str`):
|
73 |
+
The headline of the article.
|
74 |
+
body (`str`):
|
75 |
+
The body of the article.
|
76 |
+
Returns:
|
77 |
+
`str`: The formatted prompt.
|
78 |
+
"""
|
79 |
+
|
80 |
+
return (
|
81 |
+
f"Ahora eres una Inteligencia Artificial experta en resumir noticias. "
|
82 |
+
f"Este es el titular de la noticia: {headline}\n"
|
83 |
+
f"Por favor, genera un resumen corto de la noticia. Recuerda responder siempre en Español.\n"
|
84 |
+
f"Este es el cuerpo de la noticia:\n"
|
85 |
+
f"{body}\n"
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
def clickbait_prompt_flor(
|
90 |
+
headline: str,
|
91 |
+
body: str,
|
92 |
+
) -> str:
|
93 |
+
"""
|
94 |
+
Specific prompt for FLOR-6.3B-Instructed which uses a prompt format that is difficult to adapt,
|
95 |
+
into a jinja template.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
headline (`str`):
|
99 |
+
The headline of the article.
|
100 |
+
body (`str`):
|
101 |
+
The body of the article.
|
102 |
+
Returns:
|
103 |
+
`str`: The formatted prompt.
|
104 |
+
"""
|
105 |
+
|
106 |
+
return (
|
107 |
+
f"### Instruction\n"
|
108 |
+
f"Ahora eres una Inteligencia Artificial experta en desmontar titulares sensacionalistas o clickbait. "
|
109 |
+
f"Tu tarea consiste en analizar noticias con titulares sensacionalistas y "
|
110 |
+
f"generar un resumen de una sola frase que revele la verdad detrás del titular.\n"
|
111 |
+
f"Este es el titular de la noticia: {headline}\n"
|
112 |
+
f"El titular plantea una pregunta o proporciona información incompleta. "
|
113 |
+
f"Debes buscar en el cuerpo de la noticia una frase que responda lo que se sugiere en el título. "
|
114 |
+
f"Siempre que puedas cita el texto original, especialmente si se trata de una frase que alguien ha dicho. "
|
115 |
+
f"Si citas una frase que alguien ha dicho, usa comillas para indicar que es una cita. "
|
116 |
+
f"Usa siempre las mínimas palabras posibles. No es necesario que la respuesta sea una oración completa. "
|
117 |
+
f"Puede ser sólo el foco de la pregunta. "
|
118 |
+
f"Recuerda responder siempre en Español.\n"
|
119 |
+
f"Este es el cuerpo de la noticia:\n"
|
120 |
+
f"### Context\n"
|
121 |
+
f"{body}\n"
|
122 |
+
f"### Answer\n"
|
123 |
+
)
|
requirements.txt
CHANGED
@@ -3,4 +3,6 @@ setuptools
|
|
3 |
gradio
|
4 |
hf_transfer
|
5 |
beautifulsoup4
|
6 |
-
numpy
|
|
|
|
|
|
3 |
gradio
|
4 |
hf_transfer
|
5 |
beautifulsoup4
|
6 |
+
numpy
|
7 |
+
transformers
|
8 |
+
torch
|
utils.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from transformers import (
|
6 |
+
LogitsProcessor,
|
7 |
+
)
|
8 |
+
|
9 |
+
|
10 |
+
class StopAfterTokenIsGenerated(LogitsProcessor):
|
11 |
+
def __init__(self, stops: List[torch.tensor], eos_token_id: int):
|
12 |
+
super().__init__()
|
13 |
+
|
14 |
+
self.stops = stops
|
15 |
+
self.eos_token_id = eos_token_id
|
16 |
+
logging.info(f"Stopping criteria words ids: {self.stops}")
|
17 |
+
self.first_batch = True
|
18 |
+
|
19 |
+
def __call__(
|
20 |
+
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
|
21 |
+
) -> torch.FloatTensor:
|
22 |
+
"""
|
23 |
+
Args:
|
24 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
25 |
+
Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
|
26 |
+
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
|
27 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
|
28 |
+
search or log softmax for each vocabulary token when using beam search
|
29 |
+
|
30 |
+
Return:
|
31 |
+
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
|
32 |
+
|
33 |
+
"""
|
34 |
+
if self.first_batch:
|
35 |
+
self.first_batch = False
|
36 |
+
return scores
|
37 |
+
|
38 |
+
for seq_no, seq in enumerate(input_ids):
|
39 |
+
# logging.info(seq_no)
|
40 |
+
for stop in self.stops:
|
41 |
+
stop = stop.to(device=seq.device, dtype=seq.dtype)
|
42 |
+
if (
|
43 |
+
len(seq) >= len(stop)
|
44 |
+
and torch.all((stop == seq[-len(stop) :])).item()
|
45 |
+
):
|
46 |
+
scores[seq_no, :] = -float("inf")
|
47 |
+
scores[seq_no, self.eos_token_id] = 0
|
48 |
+
logging.info(f"Stopping criteria found: {stop}")
|
49 |
+
break
|
50 |
+
|
51 |
+
return scores
|
52 |
+
|
53 |
+
def reset(self):
|
54 |
+
self.first_batch = True
|