ML-Image / app.py
ML-INTA's picture
Update app.py
b048535
raw
history blame
3.28 kB
######################
# Datos importados
######################
#Datos importados de: https://huggingface.co/runwayml/stable-diffusion-v1-5
######################
# Import libraries
######################
import gradio as gr
# ######################
# Variables globales
# ######################
import os
USUARIO = os.getenv("USUARIO")
#model_id = "helenai/runwayml-stable-diffusion-v1-5-ov-fp32"
model_id = "runwayml/stable-diffusion-v1-5" #-> original
# ######################
# Funciones auxiliares
# ######################
if False:
def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
with st.container():
st.write(f'Vamos por la iteración {step}')
st.progress(step*2) #bar_progress debe empezar con 0 y terminar en 100
st.write(f'Quedan {timestep/100:.0f} segundos')
# ######################
# Modelo
# ######################
def ia_imagenes(modelo, prompt, prompt_negativo, uploaded_file, my_strength, my_guidance_scale):
if modelo == "Texto":
from diffusers import StableDiffusionPipeline
import torch
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
#revision="fp16" if torch.cuda.is_available() else "fp32",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
requires_safety_checker = False
).to("cpu")
image_pipe = pipe(prompt, negative_prompt=prompt_negativo, width=728, height=728) #otras variables: guidance_scale=guidance_scale, num_inference_steps=steps, callback = pipe_callback
imagen = image_pipe.images[0]
return imagen
elif modelo == "Imagen":
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
import torch
uploaded_file = Image.fromarray(uploaded_file)
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id,
#revision="fp16" if torch.cuda.is_available() else "fp32",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
requires_safety_checker = False
).to("cpu")
my_strength = 0.8 if my_strength == 0 or my_strength == None else my_strength
my_guidance_scale = 7.5 if my_guidance_scale == 0 or my_guidance_scale == None else my_guidance_scale
imagen = pipe(prompt, image=uploaded_file, negative_prompt=prompt_negativo, strength = my_strength, guidance_scale = my_guidance_scale).images[0] #, strength=0.8, guidance_scale=7.5
return imagen
else:
raise gr.Error("Te has olvidado de marcar una opción")
demo = gr.Interface(
fn = ia_imagenes,
inputs = [
gr.Radio(["Texto", "Imagen"],value = "Texto"),
"text",
"text",
"image",
"number",
"number"
],
outputs = "image",
title="Creación de Imagenes 🖼️",
)
demo.launch(show_error = True, auth=(USUARIO, USUARIO), share=False)
'''
FALTA:
-BOTON PARA DESCARGAR IMAGEN
-TIEMPO QUE TARDA
'''