File size: 3,475 Bytes
18a1a9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e288a0
18a1a9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e288a0
18a1a9c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import gradio as gr
from PIL import Image
import numpy as np
import cv2
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate

size = 128

def preprocess_image(image, size=128):
    image = image.resize((size, size))
    image = image.convert("L")
    image = np.array(image) / 255.0
    return image

def conv_block(input, num_filters):
    conv = Conv2D(num_filters, (3, 3), activation="relu", padding="same", kernel_initializer='he_normal')(input)
    conv = Conv2D(num_filters, (3, 3), activation="relu", padding="same", kernel_initializer='he_normal')(conv)
    return conv

def encoder_block(input, num_filters):
    conv = conv_block(input, num_filters)
    pool = MaxPooling2D((2, 2))(conv)
    return conv, pool

def decoder_block(input, skip_features, num_filters):
    uconv = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input)
    con = concatenate([uconv, skip_features])
    conv = conv_block(con, num_filters)
    return conv

def build_model(input_shape):
    input_layer = Input(input_shape)
    
    s1, p1 = encoder_block(input_layer, 64)
    s2, p2 = encoder_block(p1, 128)
    s3, p3 = encoder_block(p2, 256)
    s4, p4 = encoder_block(p3, 512)

    b1 = conv_block(p4, 1024)

    d1 = decoder_block(b1, s4, 512)
    d2 = decoder_block(d1, s3, 256)
    d3 = decoder_block(d2, s2, 128)
    d4 = decoder_block(d3, s1, 64)
    
    output_layer = Conv2D(1, 1, padding="same", activation="sigmoid")(d4)                                                               
    model = Model(input_layer, output_layer, name="U-Net")
    model.load_weights('BreastCancerSegmentation.h5')
    return model
    
def preprocess_image(image, size=128):
    image = cv2.resize(image, (size, size))
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    image = image / 255.
    return image

def segment(image):
    image = preprocess_image(image, size=size)
    image = np.expand_dims(image, 0)
    output = model.predict(image, verbose=0)
    mask_image = output[0]
    mask_image = np.squeeze(mask_image, -1)
    mask_image *= 255
    mask_image = mask_image.astype(np.uint8)
    mask_image = Image.fromarray(mask_image).convert("L")

    # Porcentagem de 0
    positive_pixels = np.count_nonzero(mask_image)
    total_pixels = mask_image.size[0] * mask_image.size[1]
    percentage = (positive_pixels / total_pixels) * 100

    # Calcular os percentuais de 0 e 1
    class_0_percentage = 100 - percentage
    class_1_percentage = percentage

    return mask_image, class_0_percentage, class_1_percentage

if __name__ == "__main__":
    model = build_model(input_shape=(size, size, 1))
    gr.Interface(
        fn=segment,
        inputs="image",
        outputs=[
            gr.Image(type="pil", label="Máscara de Câncer de Mama"),
            gr.Number(label="Porcentagem da Classe 0"),
            gr.Number(label="Porcentagem da Classe 1")
        ],
        examples=[["benign(10).png"], ["benign(109).png"]],
        title='<h1 style="text-align: center;">Segmentação de Imagens de Ultrassom de Câncer de Mama! 💐 </h1>',
        description="""
        Confira esse avanço emocionante no campo do diagnóstico e tratamento do câncer de mama!
        Foi desenvolvido um demo de Segmentação de Imagens de Ultrassom de Câncer de Mama.
        Faça o upload de um arquivo de imagem ou experimente um dos exemplos abaixo! 🙌
        """
    ).launch(debug=True)