charlesnchr commited on
Commit
35adcb2
1 Parent(s): 4c59c61

First version complete

Browse files
.gitattributes CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
32
+ *.tif filter=lfs diff=lfs merge=lfs -text
NNfunctions.py CHANGED
@@ -246,20 +246,23 @@ def save_image(data, filename,cmap):
246
 
247
  def EvaluateModel(net,opt,stack):
248
 
 
 
 
249
  os.makedirs(opt.out, exist_ok=True)
250
 
251
  print(stack.shape)
252
  inputimg, widefield = prepimg(stack, opt)
253
 
254
  if opt.norm == 'convert' or 'minmax' in opt.norm or 'adapthist' in opt.norm:
255
- cmap = 'magma'
256
  else:
257
  cmap = 'gray'
258
 
259
  # skimage.io.imsave('%s_wf.png' % outfile,(255*widefield.numpy()).astype('uint8'))
260
  wf = (255*widefield.numpy()).astype('uint8')
261
  wf_upscaled = skimage.transform.rescale(wf,1.5,order=3,multichannel=False) # should ideally be done by drawing on client side, in javascript
262
- # save_image(wf_upscaled,'%s_wf.png' % outfile,cmap)
263
 
264
  # skimage.io.imsave('%s.tif' % outfile, inputimg.numpy())
265
 
@@ -276,12 +279,13 @@ def EvaluateModel(net,opt,stack):
276
  if opt.norm == 'convert':
277
  pil_sr_img = transforms.functional.rotate(pil_sr_img,-90)
278
 
279
- #pil_sr_img.save('%s.png' % outfile) # true output for downloading, no LUT
280
  sr_img = np.array(pil_sr_img)
281
- sr_img = exposure.equalize_adapthist(sr_img,clip_limit=0.01)
282
- # skimage.io.imsave('%s.png' % outfile, sr_img) # true out for downloading, no LUT
 
 
283
 
284
- # sr_img = skimage.transform.rescale(sr_img,1.5,order=3,multichannel=False) # should ideally be done by drawing on client side, in javascript
285
- # save_image(sr_img,'%s_sr.png' % outfile,cmap)
286
- # return outfile + '_sr.png', outfile + '_wf.png', outfile + '.png'
287
- return sr_img
246
 
247
  def EvaluateModel(net,opt,stack):
248
 
249
+ outfile = datetime.datetime.utcnow().strftime('%H-%M-%S')
250
+ outfile = 'ML-SIM_%s' % outfile
251
+
252
  os.makedirs(opt.out, exist_ok=True)
253
 
254
  print(stack.shape)
255
  inputimg, widefield = prepimg(stack, opt)
256
 
257
  if opt.norm == 'convert' or 'minmax' in opt.norm or 'adapthist' in opt.norm:
258
+ cmap = 'viridis'
259
  else:
260
  cmap = 'gray'
261
 
262
  # skimage.io.imsave('%s_wf.png' % outfile,(255*widefield.numpy()).astype('uint8'))
263
  wf = (255*widefield.numpy()).astype('uint8')
264
  wf_upscaled = skimage.transform.rescale(wf,1.5,order=3,multichannel=False) # should ideally be done by drawing on client side, in javascript
265
+ save_image(wf_upscaled,'%s_wf.png' % outfile,cmap)
266
 
267
  # skimage.io.imsave('%s.tif' % outfile, inputimg.numpy())
268
 
279
  if opt.norm == 'convert':
280
  pil_sr_img = transforms.functional.rotate(pil_sr_img,-90)
281
 
282
+ # pil_sr_img.save('%s.png' % outfile) # true output for downloading, no LUT
283
  sr_img = np.array(pil_sr_img)
284
+ # sr_img = exposure.equalize_adapthist(sr_img,clip_limit=0.01)
285
+ skimage.io.imsave('%s.png' % outfile, sr_img) # true out for downloading, no LUT
286
+
287
+ sr_img = skimage.transform.rescale(sr_img,1.5,order=3,multichannel=False) # should ideally be done by drawing on client side, in javascript
288
 
289
+ save_image(sr_img,'%s_sr.png' % outfile,cmap)
290
+ return outfile + '_sr.png', outfile + '_wf.png', outfile + '.png'
291
+ # return wf, sr_img, outfile
 
TestImages/MAI-SIM_membrane.tif ADDED

Git LFS Details

  • SHA256: 2bd10be3c8b868c2f0b3b3fc48e54ef3c05dc14617c4c88d240ea5645f6e96c4
  • Pointer size: 132 Bytes
  • Size of remote file: 4.76 MB
TestImages/MAI-SIM_microtubules.tif ADDED

Git LFS Details

  • SHA256: fa1a04c5e1d9982090286099c7214630eee86794327860de7ba8a99704f194f0
  • Pointer size: 132 Bytes
  • Size of remote file: 6.29 MB
TestImages/SLM-SIM_beads.tif ADDED

Git LFS Details

  • SHA256: 03f18553d5fed62323ff6b2d61c55f865cc25fb3093ea73678a6bd04b4cd47b7
  • Pointer size: 132 Bytes
  • Size of remote file: 4.73 MB
TestImages/SLM-SIM_endoplasmic.tif ADDED

Git LFS Details

  • SHA256: 2535eb96855a053b93a5ede741e6a4c97401964e67a332cc09119200d323e82f
  • Pointer size: 132 Bytes
  • Size of remote file: 6.3 MB
TestImages/Synthetic_Barbara.tif ADDED

Git LFS Details

  • SHA256: c1c8c1059ae1637077d7527cdd22338e3b310641c053c0e6d18904002b23ef53
  • Pointer size: 132 Bytes
  • Size of remote file: 6.82 MB
TestImages/Synthetic_penguin.tif ADDED

Git LFS Details

  • SHA256: ec5aa008cd9f050989bac3666945ad1d5a21bde8209634345ad0646ebfacc167
  • Pointer size: 132 Bytes
  • Size of remote file: 6.82 MB
app.py CHANGED
@@ -11,49 +11,112 @@ import numpy as np
11
  from PIL import Image
12
  import io
13
  import base64
 
14
  from NNfunctions import *
15
 
16
  opt = GetOptions_allRnd_0317()
17
  net = LoadModel(opt)
18
 
19
- def predict(image):
20
- img = np.array(image)
21
- img = np.concatenate((img,img,img),axis=2)
22
- img = np.transpose(img, (2,0,1))
 
 
 
 
23
 
24
  # sr,wf,out = EvaluateModel(net,opt,img,outfile)
25
- sr_img = EvaluateModel(net,opt,img)
 
 
 
 
 
 
 
26
 
27
- return sr_img
28
 
 
29
 
30
  title = '<h1 style="text-align: center;">ML-SIM: Reconstruction of SIM images with deep learning</h1>'
31
 
32
  description = """
33
- ## About
34
- This space demonstrates the use of a semantic segmentation model to segment pets and classify them
35
- according to the pixels.
36
- ## 🚀 To run
37
- Upload a pet image and hit submit or select one from the given examples
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  """
39
 
40
- inputs = gr.inputs.Image(label="Upload a TIFF image", type = 'pil', optional=False)
 
 
41
  outputs = [
42
- gr.outputs.Image(label="SIM Reconstruction")
 
 
43
  # , gr.outputs.Textbox(type="auto",label="Pet Prediction")
44
  ]
45
 
46
- examples = [
47
- "./examples/dogcat.jpeg",
48
- ]
49
-
50
-
51
 
52
  interface = gr.Interface(fn=predict,
53
  inputs=inputs,
54
  outputs=outputs,
55
  title = title,
56
  description=description,
57
- examples=examples
 
 
58
  )
59
  interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  from PIL import Image
12
  import io
13
  import base64
14
+ import skimage
15
  from NNfunctions import *
16
 
17
  opt = GetOptions_allRnd_0317()
18
  net = LoadModel(opt)
19
 
20
+ gr.close_all()
21
+
22
+ def predict(imagefile):
23
+ # img = np.array(skimage.io.imread(imagefile.name))
24
+ # img = np.concatenate((img,img,img),axis=2)
25
+ # img = np.transpose(img, (2,0,1))
26
+
27
+ img = skimage.io.imread(imagefile.name)
28
 
29
  # sr,wf,out = EvaluateModel(net,opt,img,outfile)
30
+ sr, wf, sr_download = EvaluateModel(net,opt,img)
31
+
32
+ return wf, sr, sr_download
33
+
34
+ def process_example(filename):
35
+ basename = os.path.basename(filename)
36
+ basename = basename.replace('.png','.tif')
37
+ img = skimage.io.imread('TestImages/%s' % basename)
38
 
39
+ sr, wf, sr_download = EvaluateModel(net,opt,img)
40
 
41
+ return wf, sr
42
 
43
  title = '<h1 style="text-align: center;">ML-SIM: Reconstruction of SIM images with deep learning</h1>'
44
 
45
  description = """
46
+ This space demonstrates the ML-SIM method for reconstruction of structured illumination microscopy images.
47
+
48
+ ### <a href="https://opg.optica.org/boe/viewmedia.cfm?uri=boe-12-5-2720&html=true" target='_blank' > ML-SIM: universal reconstruction of structured illumination microscopy images using transfer learning </a>
49
+
50
+ _Charles N. Christensen<sup>1,2,*</sup>, Edward N. Ward<sup>1</sup>, Meng Lu<sup>1</sup>, Pietro Lio<sup>2</sup>, Clemens F. Kaminski_</br></br>
51
+ <sup>1</sup>University of Cambridge, Department of Chemical Engineering and Biotechnology, Laser Analytics Group</br>
52
+ <sup>2</sup>University of Cambridge, Department of Computer Science and Technology, Artificial Intelligence Group</br>
53
+ <sup> *</sup>**Author of this repository**:
54
+ - GitHub: [charlesnchr](http://github.com/charlesnchr)
55
+ - Email: charles.n.chr@gmail.com
56
+ - Twitter: [charlesnchr](https://twitter.com/charlesnchr)
57
+
58
+ ---
59
+
60
+ ## 🔬 To ru ML-SIM
61
+ Upload a TIFF image and hit submit or select one from the examples below.
62
+ """
63
+
64
+ article = """
65
+
66
+ ---
67
+ ### Read more
68
+ - <a href='https://ML-SIM.com' target='_blank'>ML-SIM.com</a>
69
+ - <a href='https://charles-christensen.com' target='_blank'>Website</a>
70
+ - <a href='https://github.com/charlesnchr/ML-SIM' target='_blank'>Github</a>
71
+ - <a href='https://opg.optica.org/boe/viewmedia.cfm?uri=boe-12-5-2720&html=true' target='_blank'>Publication</a>
72
  """
73
 
74
+ # inputs = gr.inputs.Image(label="Upload a TIFF image", type = 'pil', optional=False)
75
+
76
+ inputs = gr.inputs.File(label="Upload a TIFF image", type = 'file', optional=False)
77
  outputs = [
78
+ gr.outputs.Image(label="INPUT (Wide-field projection)"),
79
+ gr.outputs.Image(label="OUTPUT (ML-SIM)"),
80
+ gr.outputs.File(label="Download SR image" )
81
  # , gr.outputs.Textbox(type="auto",label="Pet Prediction")
82
  ]
83
 
84
+ examples = glob.glob('TestImages/*')
 
 
 
 
85
 
86
  interface = gr.Interface(fn=predict,
87
  inputs=inputs,
88
  outputs=outputs,
89
  title = title,
90
  description=description,
91
+ article=article,
92
+ examples=examples,
93
+ allow_flagging='never'
94
  )
95
  interface.launch()
96
+
97
+
98
+ # with gr.Blocks() as interface:
99
+ # gr.Markdown(title)
100
+ # gr.Markdown(description)
101
+
102
+ # with gr.Row():
103
+ # input1 = gr.inputs.File(label="Upload a TIFF image", type = 'file', optional=False)
104
+
105
+ # submit_btn = gr.Button("Reconstruct")
106
+
107
+ # with gr.Row():
108
+ # output1 = gr.outputs.Image(label="Wide-field projection")
109
+ # output2 = gr.outputs.Image(label="SIM Reconstruction")
110
+
111
+ # output3 = gr.File(label="Download SR image", visible=False)
112
+
113
+ # submit_btn.click(
114
+ # predict,
115
+ # input1,
116
+ # [output1, output2, output3]
117
+ # )
118
+
119
+ # gr.Examples(examples, input1, [output1, output2, output3])
120
+
121
+
122
+ # interface.launch()