File size: 2,322 Bytes
7545ee8
02743b2
 
 
 
 
 
 
 
 
7545ee8
02743b2
 
 
 
 
 
 
 
 
 
2f421e1
a41d6d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24917f4
 
a41d6d8
7a1cd48
a41d6d8
 
 
 
d99e09c
78c1675
d99e09c
a41d6d8
d99e09c
02743b2
 
 
 
 
d99e09c
02743b2
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
from skimage import io, segmentation, morphology, measure, exposure
from sribd_cellseg_models import MultiStreamCellSegModel,ModelConfig
import numpy as np
import tifffile as tif
import requests
import torch
from PIL import Image
from overlay import visualize_instances_map
import cv2


def normalize_channel(img, lower=1, upper=99):
    non_zero_vals = img[np.nonzero(img)]
    percentiles = np.percentile(non_zero_vals, [lower, upper])
    if percentiles[1] - percentiles[0] > 0.001:
        img_norm = exposure.rescale_intensity(img, in_range=(percentiles[0], percentiles[1]), out_range='uint8')
    else:
        img_norm = img
    return img_norm.astype(np.uint8)

def predict(img_name, model=None, device=None, reduce_labels=True):
    if img_name.endswith('.tif') or img_name.endswith('.tiff'):
        img_data = tif.imread(img_name)
    else:
        img_data = io.imread(img_name)
	# normalize image data
    if len(img_data.shape) == 2:
        img_data = np.repeat(np.expand_dims(img_data, axis=-1), 3, axis=-1)
    elif len(img_data.shape) == 3 and img_data.shape[-1] > 3:
        img_data = img_data[:,:, :3]
    else:
        pass
    pre_img_data = np.zeros(img_data.shape, dtype=np.uint8)
    for i in range(3):
        img_channel_i = img_data[:,:,i]
        if len(img_channel_i[np.nonzero(img_channel_i)])>0:
            pre_img_data[:,:,i] = normalize_channel(img_channel_i, lower=1, upper=99)

    my_model = MultiStreamCellSegModel.from_pretrained("Lewislou/cellseg_sribd")
    checkpoints = torch.load('model.pt',map_location=torch.device('cpu'))
    my_model.__init__(ModelConfig())
    my_model.load_checkpoints(checkpoints)
    with torch.no_grad():
        output = my_model(pre_img_data)
        print(output.shape)
    overlay = visualize_instances_map(pre_img_data,output)
    print(pre_img_data.shape,overlay.shape)
#cv2.imwrite('prediction.png', cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR))
    return pre_img_data,overlay
gr.Interface(
    predict,
    inputs=[gr.components.Image(label="Upload Input Image", type="filepath"),
            gr.components.Textbox(label='Model Name', value='sribd_med', max_lines=1)],
    outputs=[gr.Image(label="Processed Image"),
             gr.Image(label="Pred Image"),
             ],
    title="Cell Segmentation Results",
).launch()