SophieDC commited on
Commit
72a1628
1 Parent(s): 993904f

Upload segmentation.py

Browse files
Files changed (1) hide show
  1. segmentation.py +72 -0
segmentation.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import libraries
2
+
3
+ import cv2
4
+ from tensorflow import keras
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ from PIL import Image
8
+ import segmentation_models as sm
9
+
10
+ def get_mask(image):
11
+ model_path = "Segmentation/model_checkpoint.h5"
12
+ CLASSES = ['sofa']
13
+ BACKBONE = 'resnet50'
14
+
15
+ # define network parameters
16
+ n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
17
+ activation = 'sigmoid' if n_classes == 1 else 'softmax'
18
+ preprocess_input = sm.get_preprocessing(BACKBONE)
19
+ sm.set_framework('tf.keras')
20
+ LR=0.0001
21
+
22
+ #create model architecture
23
+ model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
24
+ # define optomizer
25
+ optim = keras.optimizers.Adam(LR)
26
+ # Segmentation models losses can be combined together by '+' and scaled by integer or float factor
27
+ dice_loss = sm.losses.DiceLoss()
28
+ focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
29
+ total_loss = dice_loss + (1 * focal_loss)
30
+ # actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
31
+ # total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
32
+ metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
33
+ # compile keras model with defined optimozer, loss and metrics
34
+ model.compile(optim, total_loss, metrics)
35
+
36
+ #load model
37
+ model.load_weights(model_path)
38
+
39
+
40
+ test_img = np.array(image)#cv2.imread(path, cv2.IMREAD_COLOR)
41
+ test_img = cv2.resize(test_img, (640, 640))
42
+ test_img = cv2.cvtColor(test_img, cv2.COLOR_RGB2BGR)
43
+ test_img = np.expand_dims(test_img, axis=0)
44
+
45
+ prediction = model.predict(test_img).round()
46
+ mask = Image.fromarray(prediction[...,0].squeeze()*255).convert("L")
47
+ mask.save("masks/sofa.jpg")
48
+ return np.array(mask)
49
+
50
+ def replace_sofa(image,mask,styled_sofa):
51
+ # print(mask.shape)
52
+ # mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
53
+ # print(mask.shape)
54
+ image = np.array(image)
55
+ #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
56
+ styled_sofa = cv2.cvtColor(styled_sofa, cv2.COLOR_BGR2RGB)
57
+
58
+ _, mask = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
59
+ mask_inv = cv2.bitwise_not(mask)
60
+ image_bg = cv2.bitwise_and(image,image,mask = mask_inv)
61
+ sofa_fg = cv2.bitwise_and(styled_sofa,styled_sofa,mask = mask)
62
+ new_image = cv2.add(image_bg,sofa_fg)
63
+ return new_image
64
+
65
+ # image = cv2.imread('input/sofa.jpg')
66
+ # mask = cv2.imread('masks/sofa.jpg')
67
+ # styled_sofa = cv2.imread('output/sofa_stylized_style.jpg')
68
+
69
+ # #get_mask(image)
70
+
71
+ # plt.imshow(replace_sofa(image,mask,styled_sofa))
72
+ # plt.show()