File size: 12,252 Bytes
bfcf15b
397a6e0
0b0fbff
c7b7c38
061c485
c7b7c38
f786d0a
bc77755
 
bfcf15b
d8cca02
1b41e67
f786d0a
 
7b20888
061c485
5fd6610
061c485
7ea4256
891de3b
5fd6610
9354b89
061c485
5fd6610
9354b89
061c485
 
 
7b20888
ec71e7b
c7b7c38
31a6cfe
 
891de3b
a72f193
652ad4e
e9da226
c7b7c38
c1b7442
c7b7c38
 
755678e
c7b7c38
0c79f97
 
c7b7c38
 
 
937a2b5
ec71e7b
 
 
 
 
3927601
c7b7c38
 
 
 
c9c7d64
 
 
c7b7c38
0f9a2e9
59d9fe4
31a6cfe
c7b7c38
 
3927601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6eb204
3927601
609ca1e
3927601
609ca1e
 
 
 
3927601
 
 
609ca1e
 
 
 
3927601
 
 
 
6ddc519
 
 
3927601
 
 
 
 
 
 
 
 
7b20888
1b41e67
c7b7c38
72899e4
b4e0584
1b41e67
 
c7b7c38
 
1b41e67
31a6cfe
c7b7c38
1b41e67
3927601
937a2b5
 
 
3927601
7a7e782
c1b7442
297f270
c5b9d01
1b41e67
f5ae7ba
 
 
 
 
6ac0b94
f5ae7ba
6ac0b94
f5ae7ba
7ff8c52
04f82a3
 
 
 
3927601
5fd6610
297f270
dc3e688
5f33f15
c1b7442
c5b9d01
73979a5
36082ac
73979a5
 
 
 
 
 
 
285f9db
73979a5
 
 
 
 
36082ac
4a242d2
 
 
 
73979a5
c5b9d01
1b41e67
6e18f3b
c988e88
79c0151
842b426
d7f078e
79c0151
d7f078e
fcbbd24
79c0151
6335c21
4a9f8e4
6335c21
 
 
 
 
 
 
 
 
 
 
 
 
 
d7f078e
6e18f3b
842b426
6e18f3b
79c0151
6e18f3b
285f9db
046a6e4
 
bc77755
6a2cf84
7df4bba
 
6a2cf84
7b20888
 
7df4bba
bc77755
4a9f8e4
 
6335c21
4a9f8e4
 
 
 
 
6335c21
 
13846d5
6335c21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7df4bba
 
 
 
4ae9b2a
36082ac
4ae9b2a
d6c4df5
1864ada
b9d72c3
4ae9b2a
 
 
7df4bba
36082ac
7df4bba
13846d5
7df4bba
 
 
d7f078e
1b41e67
 
 
 
 
 
 
 
 
 
 
 
 
c5b9d01
 
1b41e67
 
 
 
 
297f270
609ca1e
d8cca02
 
 
1b41e67
 
 
 
 
 
 
 
 
 
76d3c68
686fa94
1b41e67
04f82a3
7d7b9e1
8604ab8
6ac0b94
04f82a3
1b41e67
 
 
 
3521162
f5ae7ba
04f82a3
c5b9d01
73979a5
36082ac
937a2b5
 
0625694
714a351
1b41e67
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
import gradio as gr
import tifffile as tiff
import numpy as np
import os
import cv2
from PIL import Image 
import tensorflow as tf
from skimage.feature import peak_local_max


fp0 = np.zeros((96, 128), dtype = np.uint8)

fp1 = np.ones((96, 128), dtype = np.uint8)*200

# generic image reader
def imread(filepath):
    print('imread')
    fpath, fext = os.path.splitext(filepath)
    
    if fext in ['.tiff', '.tif']:
        print('imread_tiff')
        img = tiff.imread(filepath)
    else:
        print('imread_cv2')
        img = cv2.imread(filepath)

    return img
    
# tiff volume to png slice
def tif_view(filepath, z, show_depth=True):
    fpath, fext = os.path.splitext(filepath)
    print('tif'+filepath)
    print('tif'+ fext)
    if fext in ['.tiff', '.tif']:
        print('happens?')
        img = tiff.imread(filepath)
        print(img.shape)
        if img.ndim==2:
            img = np.tile(img[:,:,np.newaxis], [1,1,3])
        elif img.ndim==3:
            imin = np.argmin(img.shape)
            print(imin)
            if imin<2:
                img = np.moveaxis(img, imin, 2)
                print(img.shape)
        else:
            raise ValueError("TIF cannot have more than three dimensions")

        print(z)

        if show_depth:
            img = img[:, :, z:(z+3)]
        else:
            img = img[:, :, (z,z,z)]
        
        Ly, Lx, nchan = img.shape
        imgi = np.zeros((Ly, Lx, 3))
        nn = np.minimum(3, img.shape[-1])
        imgi[:,:,:nn] = img[:,:,:nn]

        imgi = imgi/(np.max(imgi)+0.0000001)
        imgi = (255. * imgi)
        
        filepath = fpath+'z'+str(z)+'.png'
        tiff.imwrite(filepath, imgi.astype('uint8'))
    print('tif'+filepath)   
    return filepath

def tif_view_3D(filepath, z):
    fpath, fext = os.path.splitext(filepath)
    print('tif'+filepath)
    print('tif'+ fext)

    # assumes (t,)z,(c,)y,x for now
    if fext in ['.tiff', '.tif']:
        print('happens?')
        img = tiff.imread(filepath)
        print(img.shape)
        if img.ndim==2:
            raise ValueError("TIF has only two dimensions")

        # select first timepoint
        if img.ndim==5:
            img = img[0,:,:,:,:]
            print(img.shape)

        #distinguishes between z,y,x and z,c,y,x
        if img.ndim==4:
            img = img[z,:,:,:]
            print(img.shape)
        elif img.ndim==3:
            img = img[z,:,:]
            print(img.shape)
            img = np.tile(img[:,:,np.newaxis], [1,1,3])
        else:
            raise ValueError("TIF cannot have more than five dimensions")

        imin = np.argmin(img.shape)
        img = np.moveaxis(img, imin, 2)
        print(img.shape)

        Ly, Lx, nchan = img.shape
        imgi = np.zeros((Ly, Lx, 3))
        nn = np.minimum(3, img.shape[-1])
        imgi[:,:,:nn] = img[:,:,:nn]

        imgi = imgi/(np.max(imgi)+0.0000001)
        imgi = (255. * imgi)
        
        filepath = fpath+'.png'
        tiff.imwrite(filepath, imgi.astype('uint8'))
    else: 
        raise ValueError("not a TIF/TIFF")
        
    print('tif'+filepath)   
    return filepath

# function to change image appearance
def norm_path(filepath):
    img = imread(filepath)
    img = img/(np.max(img)+0.0000001)
    #img = np.clip(img, 0, 1)
    fpath, fext = os.path.splitext(filepath)
    filepath = fpath +'.png'
    pil_image = Image.fromarray((255. * img).astype(np.uint8))
    pil_image.save(filepath)
    #imsave(filepath, pil_image)
    print('norm'+filepath)
    return filepath 

def update_image(filepath, z): 
    print('update_img')   
    #for f in filepath:
        #f = tif_view(f, z)
    filepath_show = tif_view(filepath[-1], z)
    filepath_show = norm_path(filepath_show)
    print(filepath_show)
    print(filepath)
    return (filepath_show,  [((5, 5, 10, 10), 'nothing')]), filepath, (fp0, [((5, 5, 10, 10), 'nothing')])

def update_with_example(filepath):
    print('update_btn')
    print(filepath)
    filepath_show = filepath
    fpath, fext = os.path.splitext(filepath)

    filepath = fpath+ '.tif' 
    
    return (filepath_show, [((5, 5, 10, 10), 'nothing')]), [filepath], (fp0, [((5, 5, 10, 10), 'nothing')])

def example(filepath):
    print(filepath)
    return(filepath)

def update_button(filepath, z):
    print('update_btn')
    print(filepath)
    filepath_show = tif_view(filepath, z)
    filepath_show = norm_path(filepath_show)
    print(filepath_show)
    return (filepath_show, [((5, 5, 10, 10), 'nothing')]), [filepath], (fp0, [((5, 5, 10, 10), 'nothing')])

def update_z(filepath, filepath_result, filepath_coordinates, z): 
    print('update_img')   
    #for f in filepath:
        #f = tif_view(f, z)
    filepath_show = tif_view(filepath[-1], z)
    filepath_show = norm_path(filepath_show)

    if isinstance(filepath_result, str):
        filepath_result_show = tif_view(filepath_result, z, show_depth=False)
        filepath_result_show = norm_path(filepath_result_show)
    else:
        filepath_result_show = fp0
    print(filepath_show)
    print(filepath)

    if filepath_coordinates is None:
        display_boxes = []
    else:
        display_boxes = filter_coordinates(filepath_coordinates, z)
    
    return (filepath_show, display_boxes), (filepath_result_show, display_boxes)

def detect_cells(filepath, z):
    model = tf.keras.models.load_model('./model_positions', compile=False)

    img = tiff.imread(filepath[-1])
    
    img = img/np.max(img)
    img = np.tile(img[:,:,:,np.newaxis], [1,1,2])
    img = img[np.newaxis,:,:,:,:]

    img= pad(img)

    tiles = split_z(img)
    results = []
    
    for tile in tiles:
        tensor = tf.convert_to_tensor(tile)
        result = model(tensor).numpy()
        result = result[0, :, :, :, 0]
        results.append(result)

    result = reconstruct_z(results)

    

    
    print(result.shape)
    print(filepath)
    fpath, fext = os.path.splitext(filepath[-1])
    filepath_result = fpath+'result'+'.tiff'
    
    tiff.imwrite(filepath_result, result)
    filepath_result_show = tif_view(filepath_result, z, show_depth=False)
    filepath_result_show = norm_path(filepath_result_show)

    coordinates = peak_local_max(result, min_distance=2, threshold_abs=0.2,  exclude_border=False)
    print(coordinates)
    filepath_coordinates = fpath+'coordinates'+'.csv'
    np.savetxt(filepath_coordinates, coordinates, delimiter=",")

    display_boxes = filter_coordinates(filepath_coordinates, z)

    return filepath_result, filepath_coordinates, (filepath_result_show, display_boxes)

def pad(img, z_tile = 32, xy_tile = 96):

    pad_z = z_tile-np.mod(img.shape[0], z_tile)
    pad_y = xy_tile-np.mod(img.shape[1], xy_tile)
    pad_x = xy_tile-np.mod(img.shape[2], xy_tile)

    print(pad_x)
    return np.pad(img, ((0, pad_z), (0, pad_y), (0, pad_x)))

def split_z(img, z_tile=32, z_buffer=2):
    
    if img.shape[0]==32:
        return([img])

    tiles = []
    height = 0

    while height<img.shape[0]:
        tiles.append(img[height:(height+z_tile), :, :])
        height = height+z_tile-z_buffer

    return tiles

def reconstruct_z(tiles, z_tile=32, z_buffer=2):
    
    if len(tiles)==1:
        return tiles[0]

    tiles = [tile[0:(z_tile-z_buffer), :, :] for tile in tiles]

    return np.stack(tiles, axis = 0)

def filter_coordinates(filepath_coordinates, z):

    coordinates = np.loadtxt(filepath_coordinates, delimiter=",")
    print(coordinates)
    coordinates = coordinates[np.abs(coordinates[:,0]-z)<3, :]
    print(coordinates)
    xy_coordinates = coordinates[:, (2,1)]
    rel_z = np.abs(coordinates[:, 0]-z)
    rel_z = rel_z[:, np.newaxis]

    print(rel_z)
    rel_z =1
    
    boxes = np.concatenate((xy_coordinates-4+rel_z, xy_coordinates+4-rel_z), axis=1).astype('uint32')
    print(boxes)
    boxes = [(tuple(box.tolist()),'nothing') for box in boxes]

    print(boxes)
    return boxes
    
with gr.Blocks(title = "Hello", 
               css=".gradio-container {background:purple;}") as demo:

    #filepath = ""
    with gr.Row():
        with gr.Column(scale=2):
            gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:20pt; font-weight:bold; text-align:center; color:white;">Cellpose-SAM for cellular 
            segmentation <a style="color:#cfe7fe; font-size:14pt;" href="https://www.biorxiv.org/content/10.1101/2025.04.28.651001v1" target="_blank">[paper]</a> 
            <a style="color:white; font-size:14pt;" href="https://github.com/MouseLand/cellpose" target="_blank">[github]</a>
            <a style="color:white; font-size:14pt;" href="https://www.youtube.com/watch?v=KIdYXgQemcI" target="_blank">[talk]</a>                        
            </div>""")
            gr.HTML("""<h4 style="color:white;">You may need to login/refresh for 5 minutes of free GPU compute per day (enough to process hundreds of images). </h4>""")
            
            #input_image = gr.Image(label = "Input", type = "filepath")
            input_image = gr.AnnotatedImage(label = "Input", show_legend=False, color_map = {'nothing': '#FFFF00'})

            with gr.Row():
                with gr.Column(scale=1):                    
                    with gr.Row():
                        resize = gr.Number(label = 'max resize', value = 1000)
                        max_iter = gr.Number(label = 'max iterations', value = 250) 
                        depth = gr.Number(label = 'z-scale', value = 10)

                    up_btn = gr.UploadButton("Multi-file upload (png, jpg, tif etc)", visible=True, file_count = "multiple")                        

                            
                    #gr.HTML("""<h4 style="color:white;"> Note2: Only the first image of a tif will display the segmentations, but you can download segmentations for all planes. </h4>""")
                    
                with gr.Column(scale=1):
                    send_btn = gr.Button("Run Cellpose-SAM")
                    down_btn = gr.DownloadButton("Download masks (TIF)", visible=False)            
                    down_btn2 = gr.DownloadButton("Download outlines (PNG)", visible=False)  
                    
        with gr.Column(scale=2):     
            #
            #output_image = gr.Image(label = "Output", type = "filepath")
            output_image = gr.AnnotatedImage(label = "Output", show_legend=False, color_map = {'nothing': '#FFFF00'})

    sample_list = os.listdir("./gradio_examples/jpegs")
    #sample_list = [ ("./gradio_examples/jpegs/"+sample, [((5, 5, 10, 10), 'nothing')]) for sample in sample_list]
    
    print(sample_list)
    sample_list = [ "./gradio_examples/jpegs/"+sample for sample in sample_list]
    #sample_list = []
    #for j in range(23):
    #    sample_list.append("samples/img%0.2d.png"%j)
        
    #gr.Examples(sample_list, fn = update_with_example, inputs=input_image, outputs =  [input_image, up_btn, output_image], examples_per_page=50, label = "Click on an example to try it")
    example_image = gr.Image(visible=False, type='filepath')
    gr.Examples(sample_list, fn= example, inputs=example_image, outputs=[example_image], examples_per_page=5, label = "Click on an example to try it")
    #input_image.upload(update_button, [input_image, depth], [input_image, up_btn, output_image])
    up_btn.upload(update_image, [up_btn, depth], [input_image, up_btn, output_image])
    depth.change(update_z, [up_btn, down_btn, down_btn2, depth], [input_image, output_image])
    #depth.change(update_depth, [up_btn, depth], depth)

    # DO NOT RENDER OUTPUT TWICE
    send_btn.click(detect_cells, [up_btn, depth], [down_btn, down_btn2, output_image]).then(update_image, [up_btn, depth], [input_image, up_btn, output_image])# flows, down_btn, down_btn2])

    #down_btn.click(download_function, None, [down_btn, down_btn2])
        
    gr.HTML("""<h4 style="color:white;"> Notes:<br> 
                    <li>you can load and process 2D, multi-channel tifs.
                    <li>the smallest dimension of a tif --> channels
                    <li>you can upload multiple files and download a zip of the segmentations
                    <li>install Cellpose-SAM locally for full functionality.
                    </h4>""")
    
                    
demo.launch()