Eugene Siow commited on
Commit
a211e78
1 Parent(s): 06e8cb7

Initial commit.

Browse files
Files changed (5) hide show
  1. .gitignore +7 -0
  2. README.md +2 -2
  3. app.py +78 -0
  4. packages.txt +4 -0
  5. requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ .idea/
2
+ venv/
3
+ tmp/
4
+ *.bmp
5
+ *.jpg
6
+ *.png
7
+ *.db
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Remove Bg
3
- emoji: 👁
4
  colorFrom: blue
5
- colorTo: indigo
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
1
  ---
2
  title: Remove Bg
3
+ emoji: 🖼️
4
  colorFrom: blue
5
+ colorTo: red
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import torch
4
+ import numpy as np
5
+ from torchvision import transforms
6
+
7
+ title = "Remove Bg"
8
+ description = "Automatically remove the image background from a profile photo."
9
+ article = "<p style='text-align: center'><a href='https://github.com/eugenesiow/practical-ml'>Github Repo</a>"
10
+
11
+
12
+ def make_transparent_foreground(pic, mask):
13
+ # split the image into channels
14
+ b, g, r = cv2.split(np.array(pic).astype('uint8'))
15
+ # add an alpha channel with and fill all with transparent pixels (max 255)
16
+ a = np.ones(mask.shape, dtype='uint8') * 255
17
+ # merge the alpha channel back
18
+ alpha_im = cv2.merge([b, g, r, a], 4)
19
+ # create a transparent background
20
+ bg = np.zeros(alpha_im.shape)
21
+ # setup the new mask
22
+ new_mask = np.stack([mask, mask, mask, mask], axis=2)
23
+ # copy only the foreground color pixels from the original image where mask is set
24
+ foreground = np.where(new_mask, alpha_im, bg).astype(np.uint8)
25
+
26
+ return foreground
27
+
28
+
29
+ def remove_background(input_image):
30
+ preprocess = transforms.Compose([
31
+ transforms.ToTensor(),
32
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
33
+ ])
34
+
35
+ input_tensor = preprocess(input_image)
36
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
37
+
38
+ # move the input and model to GPU for speed if available
39
+ if torch.cuda.is_available():
40
+ input_batch = input_batch.to('cuda')
41
+ model.to('cuda')
42
+
43
+ with torch.no_grad():
44
+ output = model(input_batch)['out'][0]
45
+ output_predictions = output.argmax(0)
46
+
47
+ # create a binary (black and white) mask of the profile foreground
48
+ mask = output_predictions.byte().cpu().numpy()
49
+ background = np.zeros(mask.shape)
50
+ bin_mask = np.where(mask, 255, background).astype(np.uint8)
51
+
52
+ foreground = make_transparent_foreground(input_image, bin_mask)
53
+
54
+ return foreground, bin_mask
55
+
56
+
57
+ def inference(img):
58
+ foreground, _ = remove_background(img)
59
+ return foreground
60
+
61
+
62
+ torch.hub.download_url_to_file('https://pbs.twimg.com/profile_images/691700243809718272/z7XZUARB_400x400.jpg',
63
+ 'demis.jpg')
64
+ torch.hub.download_url_to_file('https://hai.stanford.edu/sites/default/files/styles/person_medium/public/2020-03/hai_1512feifei.png?itok=INFuLABp',
65
+ 'lifeifei.png')
66
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
67
+ model.eval()
68
+
69
+ gr.Interface(
70
+ inference,
71
+ gr.inputs.Image(type="pil", label="Input"),
72
+ gr.outputs.Image(type="pil", label="Output"),
73
+ title=title,
74
+ description=description,
75
+ article=article,
76
+ examples=[['demis.jpg'], ['lifeifei.png']],
77
+ enable_queue=True
78
+ ).launch(debug=False)
packages.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ python3-opencv
2
+ ffmpeg
3
+ libsm6
4
+ libxext6
requirements.txt ADDED
Binary file (48 Bytes). View file