|
import gradio as gr |
|
from gradio_imageslider import ImageSlider |
|
from loadimg import load_img |
|
import spaces |
|
from transformers import AutoModelForImageSegmentation |
|
import torch |
|
from torchvision import transforms |
|
import zipfile |
|
import os |
|
|
|
torch.set_float32_matmul_precision(["high", "highest"][0]) |
|
|
|
birefnet = AutoModelForImageSegmentation.from_pretrained( |
|
"ZhengPeng7/BiRefNet", trust_remote_code=True |
|
) |
|
birefnet.to("cpu") |
|
transform_image = transforms.Compose( |
|
[ |
|
transforms.Resize((1024, 1024)), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), |
|
] |
|
) |
|
|
|
@spaces.GPU |
|
def fn(image): |
|
im = load_img(image, output_type="pil") |
|
im = im.convert("RGB") |
|
image_size = im.size |
|
origin = im.copy() |
|
input_images = transform_image(im).unsqueeze(0).to("cpu") |
|
|
|
with torch.no_grad(): |
|
preds = birefnet(input_images)[-1].sigmoid().cpu() |
|
pred = preds[0].squeeze() |
|
pred_pil = transforms.ToPILImage()(pred) |
|
mask = pred_pil.resize(image_size) |
|
im.putalpha(mask) |
|
|
|
output_file_path = os.path.join("output_images", "output_image_single.png") |
|
im.save(output_file_path) |
|
|
|
return (im, origin) |
|
|
|
@spaces.GPU |
|
def fn_url(url): |
|
im = load_img(url, output_type="pil") |
|
im = im.convert("RGB") |
|
origin = im.copy() |
|
image_size = im.size |
|
input_images = transform_image(im).unsqueeze(0).to("cpu") |
|
|
|
with torch.no_grad(): |
|
preds = birefnet(input_images)[-1].sigmoid().cpu() |
|
pred = preds[0].squeeze() |
|
pred_pil = transforms.ToPILImage()(pred) |
|
mask = pred_pil.resize(image_size) |
|
im.putalpha(mask) |
|
|
|
output_file_path = os.path.join("output_images", "output_image_url.png") |
|
im.save(output_file_path) |
|
|
|
return [im, origin] |
|
|
|
@spaces.GPU |
|
def batch_fn(images): |
|
output_paths = [] |
|
for idx, image_path in enumerate(images): |
|
im = load_img(image_path, output_type="pil") |
|
im = im.convert("RGB") |
|
image_size = im.size |
|
input_images = transform_image(im).unsqueeze(0).to("cpu") |
|
|
|
with torch.no_grad(): |
|
preds = birefnet(input_images)[-1].sigmoid().cpu() |
|
pred = preds[0].squeeze() |
|
pred_pil = transforms.ToPILImage()(pred) |
|
mask = pred_pil.resize(image_size) |
|
im.putalpha(mask) |
|
|
|
output_file_path = os.path.join("output_images", f"output_image_batch_{idx + 1}.png") |
|
im.save(output_file_path) |
|
output_paths.append(output_file_path) |
|
|
|
zip_file_path = os.path.join("output_images", "processed_images.zip") |
|
with zipfile.ZipFile(zip_file_path, 'w') as zipf: |
|
for file in output_paths: |
|
zipf.write(file, os.path.basename(file)) |
|
|
|
return zip_file_path |
|
|
|
batch_image = gr.File(label="Upload multiple images", type="filepath", file_count="multiple") |
|
|
|
slider1 = ImageSlider(label="Processed Image", type="pil") |
|
slider2 = ImageSlider(label="Processed Image from URL", type="pil") |
|
image = gr.Image(label="Upload an image") |
|
text = gr.Textbox(label="Paste an image URL") |
|
|
|
chameleon = load_img("chameleon.jpg", output_type="pil") |
|
url = "https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg" |
|
|
|
tab1 = gr.Interface( |
|
fn, inputs=image, outputs=slider1, examples=[chameleon], api_name="image" |
|
) |
|
|
|
tab2 = gr.Interface(fn_url, inputs=text, outputs=slider2, examples=[url], api_name="text") |
|
|
|
tab3 = gr.Interface( |
|
batch_fn, |
|
inputs=batch_image, |
|
outputs=gr.File(label="Download Processed Files"), |
|
api_name="batch", |
|
css=""" |
|
#component-37 { |
|
display: none; |
|
} |
|
""" |
|
) |
|
|
|
demo = gr.TabbedInterface( |
|
[tab1, tab2, tab3], ["image", "text", "batch"], title="Multi Birefnet for Background Removal" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|