Teddy-Project commited on
Commit
6c5ea10
verified
1 Parent(s): 0287d57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -38
app.py CHANGED
@@ -1,43 +1,16 @@
1
  import gradio as gr
 
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
4
- from diffusers import StableDiffusionPipeline
5
- from PIL import Image
6
- import io
7
 
8
- # Modelo de texto en CPU
9
- text_model = "tiiuae/falcon-rw-1b"
10
- tokenizer = AutoTokenizer.from_pretrained(text_model)
11
- model = AutoModelForCausalLM.from_pretrained(text_model)
12
- text_pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=-1)
13
 
14
- # Modelo de imagen en CPU
15
- image_pipe = StableDiffusionPipeline.from_pretrained(
16
- "runwayml/stable-diffusion-v1-5",
17
- torch_dtype=torch.float32
18
- ).to("cpu")
19
 
20
- # L贸gica para detectar si el prompt es de texto o imagen
21
- def chatbot(input_text):
22
- if any(word in input_text.lower() for word in ["imagen", "dibuja", "pinta", "foto", "muestra"]):
23
- image = image_pipe(input_text).images[0]
24
- return None, image
25
- else:
26
- response = text_pipeline(input_text, max_new_tokens=150, do_sample=True)[0]['generated_text']
27
- return response, None
28
-
29
- # Interfaz Gradio
30
- with gr.Blocks() as demo:
31
- gr.Markdown("## Bot Generador de Texto e Im谩genes (CPU)")
32
-
33
- with gr.Row():
34
- textbox = gr.Textbox(placeholder="Escribe algo... (ej: Dibuja una chica en la playa)")
35
- send = gr.Button("Enviar")
36
-
37
- with gr.Row():
38
- text_output = gr.Textbox(label="Respuesta de texto")
39
- image_output = gr.Image(label="Imagen generada")
40
-
41
- send.click(fn=chatbot, inputs=textbox, outputs=[text_output, image_output])
42
-
43
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import torch
 
 
 
 
4
 
5
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1" # Requiere acceso
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True)
7
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16, use_auth_token=True)
8
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
9
 
10
+ def chat(user_input):
11
+ prompt = f"""<s>[INST] {user_input.strip()} [/INST]"""
12
+ output = pipe(prompt, max_new_tokens=200, temperature=0.7, do_sample=True)[0]["generated_text"]
13
+ response = output.split("[/INST]")[-1].strip()
14
+ return response
15
 
16
+ gr.Interface(fn=chat, inputs="text", outputs="text", title="MyBot - Texto").launch()