File size: 4,489 Bytes
ba9a83a
 
 
 
 
 
 
 
 
 
 
 
 
35adcb2
3715c63
ba9a83a
3715c63
 
ba9a83a
35adcb2
 
 
 
 
 
 
 
ba9a83a
3715c63
35adcb2
 
 
 
 
 
 
 
ba9a83a
35adcb2
ba9a83a
35adcb2
ba9a83a
3715c63
ba9a83a
 
35adcb2
 
 
 
 
 
 
 
 
8b1a203
 
35adcb2
 
8b1a203
365590c
35adcb2
 
 
2b53c3e
 
35adcb2
2b53c3e
365590c
2b53c3e
365590c
4ceecaa
 
35adcb2
 
 
 
 
8b1a203
ba9a83a
 
35adcb2
 
 
ba9a83a
35adcb2
 
 
ba9a83a
 
 
abf9b03
ba9a83a
 
 
 
 
 
35adcb2
 
82c4e1a
 
ba9a83a
 
35adcb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
''' ----------------------------------------
* Creation Time : Sun Aug 28 21:38:58 2022
* Last Modified : Sun Aug 28 21:41:36 2022
* Author : Charles N. Christensen
* Github : github.com/charlesnchr
----------------------------------------'''

from turtle import title
import gradio as gr
import numpy as np
from PIL import Image
import io
import base64
import skimage
from NNfunctions import *

opt = GetOptions_allRnd_0317()
net = LoadModel(opt)

gr.close_all()

def predict(imagefile):
    # img = np.array(skimage.io.imread(imagefile.name))
    # img = np.concatenate((img,img,img),axis=2)
    # img = np.transpose(img, (2,0,1))

    img = skimage.io.imread(imagefile.name)

    # sr,wf,out = EvaluateModel(net,opt,img,outfile)
    sr, wf, sr_download = EvaluateModel(net,opt,img)

    return wf, sr, sr_download

def process_example(filename):
    basename = os.path.basename(filename)
    basename = basename.replace('.png','.tif')
    img = skimage.io.imread('TestImages/%s' % basename)

    sr, wf, sr_download = EvaluateModel(net,opt,img)

    return wf, sr

title = '<h1 style="text-align: center;">ML-SIM: Reconstruction of SIM images with deep learning</h1>'

description = """
This space demonstrates the ML-SIM method for reconstruction of structured illumination microscopy images.

### <a href="https://opg.optica.org/boe/viewmedia.cfm?uri=boe-12-5-2720&html=true" target='_blank' > ML-SIM: universal reconstruction of structured illumination microscopy images using transfer learning </a>

_Charles N. Christensen<sup>1,2,*</sup>, Edward N. Ward<sup>1</sup>, Meng Lu<sup>1</sup>, Pietro Lio<sup>2</sup>, Clemens F. Kaminski_</br></br>
<sup>1</sup>University of Cambridge, Department of Chemical Engineering and Biotechnology, Laser Analytics Group</br>
<sup>2</sup>University of Cambridge, Department of Computer Science and Technology, Artificial Intelligence Group</br>
- GitHub: [charlesnchr](http://github.com/charlesnchr)
- Email: charles.n.chr@gmail.com
- Publication: <a href='https://opg.optica.org/boe/viewmedia.cfm?uri=boe-12-5-2720&html=true'
target='_blank'>Journal</a>, <a href='https://arxiv.org/abs/2003.11064' target='_blank'>Preprint</a>
---

## 🔬 To run ML-SIM
Upload a TIFF image and hit submit or select one from the examples below. Note that the model here is trained for SIM stacks of 9 frames (3x3 configuration, i.e. 3 phase steps for 3 orientations).
"""

article = """
---
Select an example from the list above to try the model on a test image. Below you can see what the file names correspond to.

The ML-SIM test images are from two different microscopes (MAI-SIM and SLM-SIM) in addition to two simulated images.

Wide-field projections in pseudo-colour of these examples are shown below:

![Example test images](https://i.imgur.com/AUrp1Jr.jpeg "Example test image for ML-SIM")

---
### Read more
- <a href='https://ML-SIM.com' target='_blank'>ML-SIM.com</a>
- <a href='https://charles-christensen.com' target='_blank'>Website</a>
- <a href='https://github.com/charlesnchr/ML-SIM' target='_blank'>Github</a>
- <a href='https://twitter.com/charlesnchr' target='_blank'>Twitter</a>
"""

# inputs = gr.inputs.Image(label="Upload a TIFF image", type = 'pil', optional=False)

inputs = gr.inputs.File(label="Upload a TIFF image", type = 'file', optional=False)
outputs = [
    gr.outputs.Image(label="INPUT (Wide-field projection)"),
    gr.outputs.Image(label="OUTPUT (ML-SIM)"),
    gr.outputs.File(label="Download SR image" )
    # , gr.outputs.Textbox(type="auto",label="Pet Prediction")
]

examples = glob.glob('*.tif')

interface = gr.Interface(fn=predict,
    inputs=inputs,
    outputs=outputs,
    title = title,
    description=description,
    article=article,
    examples=examples,
    allow_flagging='never',
    cache_examples=False
    )
interface.launch()


# with gr.Blocks() as interface:
#     gr.Markdown(title)
#     gr.Markdown(description)

#     with gr.Row():
#         input1 = gr.inputs.File(label="Upload a TIFF image", type = 'file', optional=False)

#     submit_btn = gr.Button("Reconstruct")

#     with gr.Row():
#         output1 = gr.outputs.Image(label="Wide-field projection")
#         output2 = gr.outputs.Image(label="SIM Reconstruction")

#     output3 = gr.File(label="Download SR image", visible=False)

#     submit_btn.click(
#         predict,
#         input1,
#         [output1, output2, output3]
#     )

#     gr.Examples(examples, input1, [output1, output2, output3])


# interface.launch()