ItchyFingaz commited on
Commit
ea9668d
1 Parent(s): 375333c

Update custom_node_furniture_mask.py

Browse files
Files changed (1) hide show
  1. custom_node_furniture_mask.py +24 -38
custom_node_furniture_mask.py CHANGED
@@ -1,61 +1,47 @@
1
  # custom_node_furniture_mask.py by StyleSpace (and GPT4)
2
-
3
- # custom_node_furniture_mask.py
4
  import torch
5
- import numpy as np
6
- from PIL import Image
7
  import torchvision.transforms as T
8
  from torchvision.models.segmentation import deeplabv3_resnet50
9
 
10
- class FurnitureMask:
11
  def __init__(self):
12
- self.segmentation_model = deeplabv3_resnet50(pretrained=True, progress=False, num_classes=150).eval()
 
 
 
 
 
 
13
 
14
  @classmethod
15
  def INPUT_TYPES(cls):
16
  return {
17
  "required": {
18
- "image": ("IMAGE",),
19
  },
20
  }
21
 
22
- RETURN_TYPES = {
23
- "latent": "LATENT",
24
- "mask": "MASK",
25
- }
26
- FUNCTION = "generate_mask"
27
-
28
- CATEGORY = "masking"
29
-
30
- def generate_mask(self, image):
31
- pil_image = self.tensor2pil(image)
32
 
33
- furniture_classes = [20, 33, 63, 84, 85, 87, 88, 89, 91, 96, 97, 98, 100, 102, 104, 105, 106, 107, 109, 112, 113, 115, 116, 117, 118, 120, 121, 122, 123, 124, 126, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152]
34
-
35
- preprocess = T.Compose([
36
- T.Resize(256),
37
- T.CenterCrop(224),
38
- T.ToTensor(),
39
- T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
40
- ])
41
-
42
- input_tensor = preprocess(pil_image).unsqueeze(0)
43
 
 
 
44
  with torch.no_grad():
45
- output = self.segmentation_model(input_tensor)['out'][0]
46
- predicted = output.argmax(0)
47
-
48
- mask = torch.zeros_like(predicted).bool()
49
- for cls in furniture_classes:
50
- mask |= (predicted == cls)
51
 
52
- mask = mask.unsqueeze(0).unsqueeze(0).float()
 
 
 
53
 
54
- return {"latent": image, "mask": mask}
 
55
 
56
- def tensor2pil(self, image):
57
- return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
58
 
59
  NODE_CLASS_MAPPINGS = {
60
- "Furniture Mask": FurnitureMask
61
  }
 
1
  # custom_node_furniture_mask.py by StyleSpace (and GPT4)
 
 
2
  import torch
 
 
3
  import torchvision.transforms as T
4
  from torchvision.models.segmentation import deeplabv3_resnet50
5
 
6
+ class FurnitureMaskNode:
7
  def __init__(self):
8
+ self.model = deeplabv3_resnet50(pretrained=True).eval()
9
+ self.transforms = T.Compose([
10
+ T.Resize(256),
11
+ T.CenterCrop(224),
12
+ T.ToTensor(),
13
+ T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
14
+ ])
15
 
16
  @classmethod
17
  def INPUT_TYPES(cls):
18
  return {
19
  "required": {
20
+ "input_image": ("IMAGE",),
21
  },
22
  }
23
 
24
+ RETURN_TYPES = ("IMAGE", "MASK")
25
+ FUNCTION = "detect_furniture"
 
 
 
 
 
 
 
 
26
 
27
+ CATEGORY = "custom"
 
 
 
 
 
 
 
 
 
28
 
29
+ def detect_furniture(self, input_image):
30
+ input_tensor = self.transforms(input_image).unsqueeze(0)
31
  with torch.no_grad():
32
+ output = self.model(input_tensor)['out'][0]
33
+ output_predictions = output.argmax(0)
 
 
 
 
34
 
35
+ non_furniture_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 20]
36
+ mask = torch.zeros_like(output_predictions, dtype=torch.bool)
37
+ for cls in non_furniture_classes:
38
+ mask |= (output_predictions == cls)
39
 
40
+ mask = ~mask
41
+ masked_image = input_image * mask.unsqueeze(-1).float()
42
 
43
+ return masked_image, mask
 
44
 
45
  NODE_CLASS_MAPPINGS = {
46
+ "FurnitureMask": FurnitureMaskNode
47
  }