Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline | |
import spaces | |
from PIL import Image | |
import numpy as np | |
# Load the ControlNet model and pipeline | |
controlnet = ControlNetModel.from_pretrained( | |
"briaai/BRIA-2.2-ControlNet-Recoloring", | |
torch_dtype=torch.float16 | |
) | |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained( | |
"briaai/BRIA-2.2", | |
controlnet=controlnet, | |
torch_dtype=torch.float16, | |
).to("cuda") | |
# Function to transform the image based on a prompt | |
def generate_image(image, prompt): | |
# Prepare the image for processing | |
image = image.convert("RGB") | |
recoloring_image = Image.fromarray(np.array(image)).convert('L').convert('RGB') | |
# Define the negative prompt | |
negative_prompt = "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" | |
# Generate the transformed image | |
results = pipe(prompt=prompt, negative_prompt=negative_prompt, image=recoloring_image, controlnet_conditioning_scale=1.0, height=1024, width=1024) | |
return results.images[0] | |
# Gradio Interface | |
description = """ | |
Anything to Anything, a workflow by Angrypenguinpng using the Bria Recolor ControlNet, check it out here: https://huggingface.co/briaai/BRIA-2.2-ControlNet-Recoloring | |
""" | |
with gr.Blocks() as demo: | |
gr.Markdown("<h1><center>Image Transformation with Bria Recolor ControlNet</center></h1>") | |
gr.Markdown(description) | |
with gr.Group(): | |
with gr.Row(): | |
image = gr.Image(label='Upload your image') | |
prompt = gr.Textbox(label='Enter your prompt', placeholder="A portrait of a beautiful and playful ethereal singer, golden designs, highly detailed, blurry background") | |
submit = gr.Button('Transform Image') | |
output_image = gr.Image(label='Transformed Image') | |
submit.click(fn=generate_image, inputs=[image, prompt], outputs=output_image) | |
demo.queue().launch() |