File size: 2,508 Bytes
e75cd3f
 
 
 
596fdc9
 
e75cd3f
 
 
 
 
 
 
2d796b3
12fce0c
e75cd3f
 
b3c5171
12fce0c
 
 
 
e75cd3f
 
 
12fce0c
 
 
cad0d11
12fce0c
 
cad0d11
12fce0c
 
 
 
 
6f064e7
7344dd2
378559f
 
 
71414d1
596fdc9
 
 
d2380b7
e94ff98
596fdc9
 
e94ff98
7d844e3
e75cd3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ab767f
e75cd3f
 
4b53639
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
import torch
import random
import numpy as np
from scipy.spatial import Delaunay
from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation\

device = torch.device("cpu")
model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-tiny-ade").to(device)
model.eval()
preprocessor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-tiny-ade")

def visualize_instance_seg_mask(mask):
    print(mask)
    print(mask.shape)
    image = np.zeros((mask.shape[0], mask.shape[1], 3))
    labels = np.unique(mask)
    print("================unquie labels")
    wall=[]
    floor=[]
    window=[]
    other=[]
    label2color = {label: (random.randint(0, 1), random.randint(0, 255), random.randint(0, 255)) for label in labels}
    for i in range(image.shape[0]):
      for j in range(image.shape[1]):
            if mask[i, j]==0:
                wall.append([i,j])
            
            elif mask[i, j]==3:
                 floor.append([i,j])
            
            elif mask[i, j]==8:
                window.append([i,j])
            else:
                other.append([i,j])

            image[i, j, :] = label2color[mask[i, j]]

    window_vertices = np.array([[x, -y,0] for x, y in floor])
    unique_vertices, indices = np.unique(window_vertices, axis=0, return_inverse=True)

    # Perform Delaunay triangulation
    tri = Delaunay(unique_vertices[:, :2])  # Triangulate only based on x and y coordinates
    
    # Extract indices
    indices = tri.simplices
    

    print(window_vertices)
    print(indices)
    
    #print(vertices)
    image = image / 255
    return image

def query_image(img):
    target_size = (img.shape[0], img.shape[1])
    inputs = preprocessor(images=img, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
    outputs.class_queries_logits = outputs.class_queries_logits.cpu()
    outputs.masks_queries_logits = outputs.masks_queries_logits.cpu()
    results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach()
    results = torch.argmax(results, dim=0).numpy()
    results = visualize_instance_seg_mask(results)
    return results

demo = gr.Interface(
    query_image, 
    inputs=[gr.Image()], 
    outputs="image",
    title="Image Segmentation Demo",
    description = "Please upload an image to see segmentation capabilities of this model",
    examples=[["work2.jpg"]]
)

demo.launch(debug=True)