Spaces:
Running
Running
import gradio | |
from diffusers import StableDiffusionPipeline | |
#import requests | |
#from PIL import Image | |
from transformers import BlipProcessor, BlipForConditionalGeneration | |
pipe = StableDiffusionPipeline.from_pretrained("ByteDance/sd2.1-base-zsnr-laionaes6")#.to("cuda") | |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") | |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") | |
def img2prompt(img): | |
raw_image = img.convert("RGB") | |
inputs = processor(raw_image, return_tensors="pt") | |
out = model.generate(**inputs) | |
output = processor.decode(out[0], skip_special_tokens=True) | |
return output | |
def prompt2img(prompt): | |
pipe = StableDiffusionPipeline.from_pretrained("ByteDance/sd2.1-base-zsnr-laionaes6").to("cuda") | |
img = pipe(prompt, guidance_scale=7.5, guidance_rescale=0.7).images[0] | |
return img | |
def wow_img2img(img): | |
return prompt2img(img2prompt(img)) | |
app = gradio.Interface(fn=wow_img2img, inputs=gradio.Image(type="pil"), outputs=gradio.Image(type="pil")) | |
app.launch(share=True) |