Spaces:
Running
Running
File size: 2,699 Bytes
9328b97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import cv2
import torch
import numpy as np
from torchvision import transforms
description = "Automatically remove the image background from a profile photo. Based on a [Space by eugenesiow](https://huggingface.co/spaces/eugenesiow/remove-bg)."
def make_transparent_foreground(pic, mask):
# split the image into channels
b, g, r = cv2.split(np.array(pic).astype('uint8'))
# add an alpha channel with and fill all with transparent pixels (max 255)
a = np.ones(mask.shape, dtype='uint8') * 255
# merge the alpha channel back
alpha_im = cv2.merge([b, g, r, a], 4)
# create a transparent background
bg = np.zeros(alpha_im.shape)
# setup the new mask
new_mask = np.stack([mask, mask, mask, mask], axis=2)
# copy only the foreground color pixels from the original image where mask is set
foreground = np.where(new_mask, alpha_im, bg).astype(np.uint8)
return foreground
def remove_background(input_image):
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
output_predictions = output.argmax(0)
# create a binary (black and white) mask of the profile foreground
mask = output_predictions.byte().cpu().numpy()
background = np.zeros(mask.shape)
bin_mask = np.where(mask, 255, background).astype(np.uint8)
foreground = make_transparent_foreground(input_image, bin_mask)
return foreground, bin_mask
def inference(img):
foreground, _ = remove_background(img)
return foreground
torch.hub.download_url_to_file('https://pbs.twimg.com/profile_images/691700243809718272/z7XZUARB_400x400.jpg',
'demis.jpg')
torch.hub.download_url_to_file('https://hai.stanford.edu/sites/default/files/styles/person_medium/public/2020-03/hai_1512feifei.png?itok=INFuLABp',
'lifeifei.png')
model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
model.eval()
gr.Interface(
inference,
gr.Image(type="pil", label="Input"),
gr.Image(type="pil", label="Output"),
description=description,
examples=[['demis.jpg'], ['lifeifei.png']],
enable_queue=True,
css=".footer{display:none !important}"
).launch(debug=False)
|