Teddy-Project commited on
Commit
88f72c8
verified
1 Parent(s): e9bc6f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -36
app.py CHANGED
@@ -1,36 +1,54 @@
1
- import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from diffusers import StableDiffusionPipeline from PIL import Image import io import os
2
-
3
- Carga del modelo de texto
4
-
5
- text_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct") text_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct", device_map="auto", torch_dtype=torch.bfloat16) text_pipe = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
6
-
7
- Carga del modelo de im谩genes
8
-
9
- image_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
10
-
11
- Memoria por usuario
12
-
13
- user_memory = {}
14
-
15
- def is_prompt_image(text): image_keywords = ["dibuja", "imagen", "pinta", "crea una imagen", "genera una foto", "haz un dibujo", "quiero ver"] return any(kw in text.lower() for kw in image_keywords)
16
-
17
- def build_prompt(user_id, message): personality = ( "Eres una asistente virtual sensual, dulce y coqueta. " "Respondes de forma cercana, creativa y provocadora. Usa emoticonos como 馃槝馃槈馃敟.\n\n" ) if user_id not in user_memory: user_memory[user_id] = []
18
-
19
- user_memory[user_id].append(f"Usuario: {message}")
20
- user_memory[user_id] = user_memory[user_id][-5:]
21
-
22
- context = "\n".join(user_memory[user_id])
23
- return personality + context + "\nAsistente:"
24
-
25
- def responder(input_text, user_id="usuario1"): if is_prompt_image(input_text): image = image_pipe(input_text).images[0] return None, image else: prompt = build_prompt(user_id, input_text) result = text_pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.8, top_p=0.95)[0]['generated_text'] reply = result.split("Asistente:")[-1].strip() user_memory[user_id].append(f"Asistente: {reply}") return reply, None
26
-
27
- with gr.Blocks() as demo: with gr.Row(): chatbot = gr.Textbox(label="Escribe algo...") btn = gr.Button("Enviar") with gr.Row(): output_text = gr.Textbox(label="Respuesta de texto") output_image = gr.Image(label="Imagen generada")
28
-
29
- def on_click(user_input):
30
- text, image = responder(user_input)
31
- return text, image
32
-
33
- btn.click(fn=on_click, inputs=[chatbot], outputs=[output_text, output_image])
34
-
35
- demo.launch()
36
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
4
+ from diffusers import StableDiffusionPipeline
5
+ from PIL import Image
6
+
7
+ # Modelo de texto
8
+ text_model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
+ tokenizer = AutoTokenizer.from_pretrained(text_model_name)
10
+ text_model = AutoModelForCausalLM.from_pretrained(
11
+ text_model_name,
12
+ torch_dtype=torch.float16,
13
+ device_map="auto"
14
+ )
15
+ text_pipe = TextGenerationPipeline(
16
+ model=text_model,
17
+ tokenizer=tokenizer,
18
+ max_new_tokens=200,
19
+ do_sample=True,
20
+ temperature=0.8,
21
+ top_p=0.95
22
+ )
23
+
24
+ # Modelo de imagen
25
+ image_pipe = StableDiffusionPipeline.from_pretrained(
26
+ "runwayml/stable-diffusion-v1-5",
27
+ torch_dtype=torch.float16
28
+ ).to("cuda")
29
+
30
+ # L贸gica para decidir si es imagen o texto
31
+ def is_image_prompt(prompt):
32
+ keywords = ["dibuja", "genera una imagen", "imagen de", "p铆ntame", "crea una ilustraci贸n"]
33
+ return any(kw in prompt.lower() for kw in keywords)
34
+
35
+ # Funci贸n del bot
36
+ def bot_response(message):
37
+ if is_image_prompt(message):
38
+ image = image_pipe(message).images[0]
39
+ return "", image
40
+ else:
41
+ prompt = "Eres una asistente coqueta, creativa y dulce.\nUsuario: " + message + "\nAsistente:"
42
+ result = text_pipe(prompt)[0]['generated_text']
43
+ reply = result.split("Asistente:")[-1].strip()
44
+ return reply, None
45
+
46
+ # Interfaz
47
+ with gr.Blocks() as demo:
48
+ gr.Markdown("## Asistente inteligente de texto e im谩genes")
49
+ input_box = gr.Textbox(label="Tu mensaje", placeholder="Escribe lo que quieras...")
50
+ text_output = gr.Textbox(label="Respuesta de texto")
51
+ image_output = gr.Image(label="Imagen generada")
52
+ input_box.submit(fn=bot_response, inputs=input_box, outputs=[text_output, image_output])
53
+
54
+ demo.launch()