Loren commited on
Commit
8df23b2
Β·
verified Β·
1 Parent(s): e785918

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +880 -0
  2. img_demo_enhance.png +0 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,880 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import imutils
4
+ from paddleocr import PaddleOCR, draw_ocr
5
+ from PIL import Image
6
+ import io
7
+ import os
8
+ import numpy as np
9
+ import ast
10
+ import operator
11
+ import matplotlib.pyplot as plt
12
+
13
+ st.set_page_config(
14
+ page_title='OpenCV Image processing', layout ="wide",
15
+ initial_sidebar_state="expanded",
16
+ )
17
+ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
18
+
19
+ st.markdown("""
20
+ <style>
21
+ .block-container {
22
+ padding-top: 1rem;
23
+ padding-bottom: 1rem;
24
+ padding-left: 1rem;
25
+ padding-right: 2rem;
26
+ }
27
+ </style>
28
+ """, unsafe_allow_html=True)
29
+
30
+ ###################################################################################################
31
+ ## INITIALISATIONS
32
+ ###################################################################################################
33
+ ###
34
+ @st.cache_data(show_spinner=True)
35
+ def initializations():
36
+ print("Initializations ...")
37
+ out_dict_lang_ppocr = {'Abaza': 'abq', 'Adyghe': 'ady', 'Afrikaans': 'af', 'Albanian': 'sq', \
38
+ 'Angika': 'ang', 'Arabic': 'ar', 'Avar': 'ava', 'Azerbaijani': 'az', 'Belarusian': 'be', \
39
+ 'Bhojpuri': 'bho','Bihari': 'bh','Bosnian': 'bs','Bulgarian': 'bg','Chinese & English': 'ch', \
40
+ 'Chinese Traditional': 'chinese_cht', 'Croatian': 'hr', 'Czech': 'cs', 'Danish': 'da', \
41
+ 'Dargwa': 'dar', 'Dutch': 'nl', 'English': 'en', 'Estonian': 'et', 'French': 'fr', \
42
+ 'German': 'german','Goan Konkani': 'gom','Hindi': 'hi','Hungarian': 'hu','Icelandic': 'is', \
43
+ 'Indonesian': 'id', 'Ingush': 'inh', 'Irish': 'ga', 'Italian': 'it', 'Japan': 'japan', \
44
+ 'Kabardian': 'kbd', 'Korean': 'korean', 'Kurdish': 'ku', 'Lak': 'lbe', 'Latvian': 'lv', \
45
+ 'Lezghian': 'lez', 'Lithuanian': 'lt', 'Magahi': 'mah', 'Maithili': 'mai', 'Malay': 'ms', \
46
+ 'Maltese': 'mt', 'Maori': 'mi', 'Marathi': 'mr', 'Mongolian': 'mn', 'Nagpur': 'sck', \
47
+ 'Nepali': 'ne', 'Newari': 'new', 'Norwegian': 'no', 'Occitan': 'oc', 'Persian': 'fa', \
48
+ 'Polish': 'pl', 'Portuguese': 'pt', 'Romanian': 'ro', 'Russia': 'ru', 'Saudi Arabia': 'sa', \
49
+ 'Serbian(cyrillic)': 'rs_cyrillic', 'Serbian(latin)': 'rs_latin', 'Slovak': 'sk', \
50
+ 'Slovenian': 'sl', 'Spanish': 'es', 'Swahili': 'sw', 'Swedish': 'sv', 'Tabassaran': 'tab', \
51
+ 'Tagalog': 'tl', 'Tamil': 'ta', 'Telugu': 'te', 'Turkish': 'tr', 'Ukranian': 'uk', \
52
+ 'Urdu': 'ur', 'Uyghur': 'ug', 'Uzbek': 'uz', 'Vietnamese': 'vi', 'Welsh': 'cy'}
53
+
54
+ out_dict_interpolation = {"INTER_LINEAR": cv2.INTER_LINEAR,
55
+ "INTER_NEAREST": cv2.INTER_NEAREST,
56
+ # "INTER_LINEAR_EXACT": cv2.INTER_LINEAR_EXACT,
57
+ "INTER_AREA": cv2.INTER_AREA,
58
+ "INTER_CUBIC": cv2.INTER_CUBIC,
59
+ "INTER_LANCZOS4": cv2.INTER_LANCZOS4,
60
+ # "INTER_NEAREST_EXACT": cv2.INTER_NEAREST_EXACT,
61
+ # "INTER_MAX": cv2.INTER_MAX,
62
+ # "WARP_FILL_OUTLIERS": cv2.WARP_FILL_OUTLIERS,
63
+ # "WARP_INVERSE_MAP": cv2.WARP_INVERSE_MAP,
64
+ }
65
+
66
+ out_dict_thresholding_type = {"THRESH_BINARY": cv2.THRESH_BINARY,
67
+ "THRESH_BINARY_INV": cv2.THRESH_BINARY_INV,
68
+ "THRESH_TRUNC": cv2.THRESH_TRUNC,
69
+ "THRESH_TOZERO": cv2.THRESH_TOZERO,
70
+ }
71
+
72
+ out_dict_adaptative_method = {"ADAPTIVE_THRESH_MEAN_C": cv2.ADAPTIVE_THRESH_MEAN_C,
73
+ "ADAPTIVE_THRESH_GAUSSIAN_C": cv2.ADAPTIVE_THRESH_GAUSSIAN_C}
74
+
75
+ return out_dict_lang_ppocr, out_dict_interpolation, out_dict_thresholding_type, out_dict_adaptative_method
76
+
77
+ ###################################################################################################
78
+ ## FONTIONS
79
+ ###################################################################################################
80
+ ###
81
+ @st.cache_data(show_spinner=False)
82
+ def load_image(in_image_file):
83
+ """Load input file and open it
84
+ Args:
85
+ in_image_file (string or Streamlit UploadedFile): image to consider
86
+ Returns:
87
+ matrix : input file opened with Opencv
88
+ """
89
+ #if isinstance(in_image_file, str):
90
+ # out_image_path = "img."+in_image_file.split('.')[-1]
91
+ #else:
92
+ # out_image_path = "img."+in_image_file.name.split('.')[-1]
93
+ if isinstance(in_image_file, str):
94
+ out_image_path = "tmp_"+in_image_file
95
+ else:
96
+ out_image_path = "tmp_"+in_image_file.name
97
+ img = Image.open(in_image_file)
98
+ img_saved = img.save(out_image_path)
99
+ # Read image
100
+ # out_image_orig = Image.open(out_image_path)
101
+ out_image_cv2 = cv2.cvtColor(cv2.imread(out_image_path), cv2.COLOR_BGR2RGB)
102
+
103
+ st.session_state.resize = False
104
+ st.session_state.scaling_factor = None
105
+ st.session_state.interpolation = None
106
+ st.session_state.rotate = None
107
+ st.session_state.angle = None
108
+ st.session_state.convolution = None
109
+ st.session_state.text_convol = None
110
+ st.session_state.convol_kernel = None
111
+ st.session_state.averaging = None
112
+ st.session_state.averaging_kernel_size = None
113
+ st.session_state.gaussian_bluring = None
114
+ st.session_state.gb_kernel_size = None
115
+ st.session_state.sigmaX = None
116
+ st.session_state.sigmaY = None
117
+ st.session_state.median_bluring = None
118
+ st.session_state.mb_kernel_size = None
119
+ st.session_state.bilateral_filtering = None
120
+ st.session_state.d = None
121
+ st.session_state.sigma_color = None
122
+ st.session_state.sigma_space = None
123
+ st.session_state.erosion = None
124
+ st.session_state.erosion_kernel_size = None
125
+ st.session_state.nb_iter_erosion = None
126
+ st.session_state.dilation = None
127
+ st.session_state.dilation_kernel_size = None
128
+ st.session_state.nb_iter_dilation = None
129
+ st.session_state.binarization = None
130
+ st.session_state.bin_thresh = None
131
+ st.session_state.bin_thresh = None
132
+ st.session_state.bin_thresholding_type = None
133
+ st.session_state.bin_otsu = None
134
+ st.session_state.thresh_typ = None
135
+ st.session_state.adaptative_thresh = None
136
+ st.session_state.at_thresholding_type = None
137
+ st.session_state.at_max_value = None
138
+ st.session_state.at_adaptative_method = None
139
+ st.session_state.at_block_size = None
140
+ st.session_state.at_const = None
141
+ st.session_state.processed_image = None
142
+
143
+ return out_image_cv2, out_image_path
144
+ ###
145
+ def eval_expr(expr):
146
+ """Eval numeric expression
147
+ Args:
148
+ expr (string): numeric expression
149
+ Returns:
150
+ float: eval result
151
+ """
152
+ result = 1.
153
+ # Dictionnary of authorized operators
154
+ operators = {
155
+ ast.Add: operator.add,
156
+ ast.Sub: operator.sub,
157
+ ast.Mult: operator.mul,
158
+ ast.Div: operator.truediv,
159
+ ast.Pow: operator.pow,
160
+ ast.USub: operator.neg,
161
+ }
162
+ def _eval(node):
163
+ if isinstance(node, ast.Expression):
164
+ return _eval(node.body)
165
+ elif isinstance(node, ast.Constant): # nombre
166
+ return node.value
167
+ elif isinstance(node, ast.BinOp): # opΓ©rations binaires
168
+ return operators[type(node.op)](_eval(node.left), _eval(node.right))
169
+ elif isinstance(node, ast.UnaryOp): # opΓ©rations unaires (-n)
170
+ return operators[type(node.op)](_eval(node.operand))
171
+ else:
172
+ raise TypeError(node)
173
+ try:
174
+ parsed = ast.parse(expr, mode='eval')
175
+ result = _eval(parsed.body)
176
+ except:
177
+ pass
178
+ return result
179
+ ###
180
+ def text_kernel_to_latex(text_eval):
181
+ """Try to parse a kernel text description like: 1/6 * [[1,1],[1,1]]
182
+ Args:
183
+ text_eval (string): the string with the kernel expression
184
+ Returns:
185
+ string: left part of input string before *
186
+ list: right part of input string after *
187
+ string: latex expression corresponding to the text kernel input
188
+ """
189
+ list_eval = text_eval.split('*')
190
+ text_kernel = list_eval[-1].strip()
191
+ list_kernel = ast.literal_eval(text_kernel)
192
+ latex = "\\begin{bmatrix}\n"
193
+ for row in list_kernel:
194
+ latex += " & ".join(map(str, row)) + " \\\\\n"
195
+ latex += "\\end{bmatrix}"
196
+ text_coeff = 1.
197
+ latex_text = latex
198
+ if len(list_eval) > 1:
199
+ text_coeff = list_eval[0].strip()
200
+ latex_text = text_coeff + ' ' + latex
201
+ return text_coeff, list_kernel, latex_text
202
+ ###
203
+ def get_img_fig(img):
204
+ """Plot image with matplotlib, in order to have image size
205
+ Args:
206
+ img (Image): Image to show
207
+ Returns:
208
+ Matplotlib figure
209
+ """
210
+ fig = plt.figure()
211
+ if len(img.shape) == 3:
212
+ plt.imshow(img, cmap=None)
213
+ else:
214
+ plt.imshow(img, cmap='gray')
215
+ return fig
216
+
217
+ @st.fragment
218
+ def show_latex(latex_code):
219
+ st.latex(latex_code)
220
+ ###################################################################################################
221
+ ## STREAMLIT APP
222
+ ###################################################################################################
223
+ st.title(''':orange[Image processing with OpenCV]''')
224
+ st.write("")
225
+ st.write("")
226
+ st.write("")
227
+ st.set_option("client.showErrorDetails", False)
228
+
229
+ dict_lang_ppocr, dict_interpolation, dict_thresholding_type, dict_adaptative_method = initializations()
230
+
231
+ cols = st.columns([0.25, 0.25, 0.5])
232
+ cols[0].markdown("#### :orange[Choose picture:]")
233
+ img_typ = cols[0].radio("#### :orange[Choose picture type:]", ['Upload file', 'Take a picture', 'Use a demo file'], \
234
+ index=0)
235
+ if img_typ == 'Upload file':
236
+ image_file = cols[1].file_uploader("Upload a file:", type=["png","jpg","jpeg"])
237
+
238
+ if img_typ == 'Take a picture':
239
+ image_file = cols[1].camera_input("Take a picture:")
240
+ if img_typ == 'Use a demo file':
241
+ image_file = 'img_demo_enhance.png'
242
+
243
+ ##----------- Process input image -----------------------------------------------------------------
244
+ if image_file is not None:
245
+ img_cv2, image_path = load_image(image_file)
246
+
247
+ cols[2].markdown('#### :orange[Original image]')
248
+ cnt_img_ori = cols[2].container(height=300, border=False)
249
+ #cnt_img_ori.image(img_cv2) #, use_container_width=True)
250
+ cnt_img_ori.pyplot(get_img_fig(img_cv2))
251
+ col1, col2 = st.columns([0.5, 0.5]) #gap="medium")
252
+
253
+ col1.markdown('#### :orange[Processed image]')
254
+ list_op = []
255
+
256
+ if col1.checkbox("GrayScale"):
257
+ try:
258
+ img_first = cv2.cvtColor(img_cv2.copy(), cv2.COLOR_BGR2GRAY)
259
+ list_op.append("Grayscale")
260
+ except Exception as e:
261
+ st.exception(e)
262
+ else:
263
+ img_first = img_cv2.copy()
264
+
265
+ if col1.checkbox("Bit-wise inversion"):
266
+ try:
267
+ img_first = cv2.bitwise_not(img_first)
268
+ list_op.append("Bit-wise inversion")
269
+ except Exception as e:
270
+ st.exception(e)
271
+
272
+ # Processed image construction
273
+ cnt_img_wrk = col1.container(height=500, border=False)
274
+ img_processed = cnt_img_wrk.empty()
275
+ img_wrk = img_first.copy()
276
+
277
+ if st.session_state.resize:
278
+ try:
279
+ img_wrk = cv2.resize(img_wrk, None, fx=st.session_state.scaling_factor,
280
+ fy=st.session_state.scaling_factor,
281
+ interpolation=dict_interpolation[st.session_state.interpolation])
282
+ list_op.append("Resize - fx="+str(st.session_state.scaling_factor)+", fy="+
283
+ str(st.session_state.scaling_factor)+", interpolation="+
284
+ st.session_state.interpolation)
285
+ except Exception as e:
286
+ st.exception(e)
287
+
288
+ if st.session_state.rotate:
289
+ try:
290
+ img_wrk = imutils.rotate(img_wrk, angle=st.session_state.angle)
291
+ list_op.append("Rotate - angle="+str(st.session_state.angle))
292
+ except Exception as e:
293
+ st.exception(e)
294
+
295
+ if st.session_state.convolution:
296
+ try:
297
+ img_wrk = cv2.filter2D(src=img_wrk, ddepth=-1, kernel=st.session_state.convol_kernel)
298
+ list_op.append("Filtering - Custom 2D Convolution - kernel="+ st.session_state.text_convol)
299
+ except Exception as e:
300
+ st.exception(e)
301
+
302
+ if st.session_state.averaging:
303
+ try:
304
+ img_wrk = cv2.blur(src=img_wrk, ksize=st.session_state.averaging_kernel_size)
305
+ list_op.append("Filtering - Averaging - kernel_size="+
306
+ str(st.session_state.averaging_kernel_size))
307
+ except Exception as e:
308
+ st.exception(e)
309
+
310
+ if st.session_state.gaussian_bluring:
311
+ try:
312
+ img_wrk = cv2.GaussianBlur(src=img_wrk, ksize=st.session_state.gb_kernel_size, \
313
+ sigmaX=st.session_state.sigmaX, sigmaY=st.session_state.sigmaY)
314
+ list_op.append("Filtering - Gaussian Blurring - ksize="+ \
315
+ str(st.session_state.gb_kernel_size)+", sigmaX="+
316
+ str(st.session_state.sigmaX)+", sigmaY="+str(st.session_state.sigmaY))
317
+ except Exception as e:
318
+ st.exception(e)
319
+
320
+ if st.session_state.median_bluring:
321
+ try:
322
+ img_wrk = cv2.medianBlur(img_wrk, st.session_state.mb_kernel_size)
323
+ list_op.append("Filtering - Median Blurring - kernel_size="+ \
324
+ str(st.session_state.mb_kernel_size))
325
+ except Exception as e:
326
+ st.exception(e)
327
+
328
+ if st.session_state.bilateral_filtering:
329
+ try:
330
+ img_wrk = cv2.bilateralFilter(img_wrk, st.session_state.d, st.session_state.sigma_color,
331
+ st.session_state.sigma_space)
332
+ list_op.append("Filtering - Bilateral Filtering - d="+ str(st.session_state.d)+
333
+ ", sigma_color="+str(st.session_state.sigma_color)+ \
334
+ ", sigma_space="+str(st.session_state.sigma_space))
335
+ except Exception as e:
336
+ st.exception(e)
337
+
338
+ if st.session_state.erosion:
339
+ try:
340
+ kernel = np.ones((st.session_state.erosion_kernel_size,
341
+ st.session_state.erosion_kernel_size),
342
+ np.uint8)
343
+ img_wrk = cv2.erode(img_wrk, kernel, iterations=st.session_state.nb_iter_erosion)
344
+ list_op.append("Erosion - kernel_size="+str(st.session_state.erosion_kernel_size)+ \
345
+ ", iterations="+str(st.session_state.nb_iter_erosion))
346
+ except Exception as e:
347
+ st.exception(e)
348
+
349
+ if st.session_state.dilation:
350
+ try:
351
+ kernel = np.ones((st.session_state.dilation_kernel_size,
352
+ st.session_state.dilation_kernel_size),
353
+ np.uint8)
354
+ img_wrk = cv2.dilate(img_wrk, kernel, iterations=st.session_state.nb_iter_dilation)
355
+ list_op.append("Dilation - kernel_size="+str(st.session_state.dilation_kernel_size )+ \
356
+ ", iterations="+str(st.session_state.nb_iter_dilation))
357
+ except Exception as e:
358
+ st.exception(e)
359
+
360
+ if st.session_state.binarization:
361
+ try:
362
+ ret, img_wrk = cv2.threshold(img_wrk, st.session_state.bin_thresh,
363
+ st.session_state.bin_value,
364
+ st.session_state.thresh_typ)
365
+ list_op.append("Thresholding - thresh="+str(st.session_state.bin_thresh)+ \
366
+ ", maxval="+str(st.session_state.bin_value)+", type="+ \
367
+ st.session_state.bin_thresholding_type+", otsu="+ \
368
+ str(st.session_state.bin_otsu))
369
+ except Exception as e:
370
+ st.exception(e)
371
+
372
+ if st.session_state.adaptative_thresh:
373
+ try:
374
+ img_wrk = cv2.adaptiveThreshold(img_wrk, st.session_state.at_max_value,
375
+ dict_adaptative_method[st.session_state.at_adaptative_method],
376
+ dict_thresholding_type[st.session_state.at_thresholding_type],
377
+ st.session_state.at_block_size, st.session_state.at_const)
378
+ list_op.append("Adaptative thresholding - maxValue="+
379
+ str(st.session_state.at_max_value)+", adaptiveMethod="+
380
+ st.session_state.at_adaptative_method+", thresholdType"+
381
+ ", thresholding_type="+st.session_state.at_thresholding_type+
382
+ ", blockSize="+str(st.session_state.at_block_size)+", C="+
383
+ str(st.session_state.at_const))
384
+ except Exception as e:
385
+ st.exception(e)
386
+
387
+ # Show image
388
+ img_processed.pyplot(get_img_fig(img_wrk))
389
+ st.session_state.processed_image = img_wrk
390
+
391
+ # Process
392
+ col2.markdown('#### :orange[Check & enhance]')
393
+
394
+ with col2.expander(":blue[Image processing]", expanded=False):
395
+ tab1, tab2, tab3, tab4, tab5 = \
396
+ st.tabs(["Resize", "Rotate", "Filtering",
397
+ "Morphologie", "Thresholding"])
398
+ with tab1: # Resize
399
+ with tab1.form("Resize parameters"):
400
+ st.session_state.scaling_factor = st.slider("Scaling factor :", 0.1, 20., 1., 0.1)
401
+ cols_tab1 = st.columns([0.1, 0.9], gap="medium", vertical_alignment="center")
402
+ cols_tab1[0].markdown("πŸ’¬", help="""An interpolation function’s goal is
403
+ to examine neighborhoods of pixels and use these neighborhoods to optically increase or decrease
404
+ the size of the image without introducing distortions (or at least as few distortions
405
+ as possible).\n
406
+ ```cv2.INTER_LINEAR``` This option uses the bilinear interpolation algorithm. Unlike INTER_NEAREST,
407
+ this does the interpolation in two dimensions and predicts the function used to calculate the color
408
+ of a pixel. This algorithm is effective in handling visual distortions while zooming or
409
+ enlarging an image.\n
410
+ ```cv2.INTER_NEAREST``` This option uses the nearest neighbor interpolation algorithm. It retains
411
+ the sharpness of the edges though the overall image may be blurred.\n
412
+ ```cv2.INTER_LINEAR_EXACT```is a modification of ```INTER_LINEAR``` and both uses bilinear
413
+ interpolation algorithm. The only difference is that the calculations in ```INTER_LINEAR_EXACT```
414
+ are accurate to a bit.\n
415
+ ```cv2.INTER_AREA``` option uses resampling using pixel area relation technique. While enlarging
416
+ images, INTER_AREA work same as INTER_NEAREST. In other cases, ```INTER_AREA works``` better in
417
+ image decimation and avoiding false inference patterns in images (moire pattern).\n
418
+ ```cv2.INTER_CUBIC``` option uses bicubic interpolation technique. This is an extension of cubic
419
+ interpolation technique and is used for 2 dimension regular grid patterns.\n
420
+ ```cv2.INTER_LANCZOS4``` option uses Lanczos interpolation over 8 x 8 pixel neighborhood technique.
421
+ It uses Fourier series and Chebyshev polynomials and is suited for images with large number of
422
+ small size details.\n
423
+ ```cv2.INTER_NEAREST_EXACT ``` is a modification of INTER_NEAREST with bit level accuracy.\n
424
+ ```cv2.INTER_MAX ``` option uses mask for interpolation codes.\n
425
+ ```cv2.WARP_FILL_OUTLIERS ``` interpolation technique skips the outliers during interpolation calculations.\n
426
+ ```cv2.WARP_INVERSE_MAP ``` option uses inverse transformation technique for interpolation.\n""")
427
+ cols_tab1[0].link_button("πŸ“š", "https://opencv.org/blog/resizing-and-rescaling-images-with-opencv/#h-resizing-with-different-interpolation-methods")
428
+ st.session_state.interpolation = cols_tab1[1].selectbox("Interpolation method:",
429
+ list(dict_interpolation.keys()))
430
+ c1, c2 = st.columns(2)
431
+ apply_tab1 = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=1)
432
+ with c2:
433
+ submit_tab1 = st.form_submit_button(":green[Confirm]")
434
+
435
+ if submit_tab1:
436
+ st.session_state.resize = apply_tab1
437
+ st.rerun()
438
+
439
+ with tab2: # Rotate
440
+ with tab2.form("Rotate parameters"):
441
+ st.session_state.angle = st.slider("Angle :", 0, 360, 0, step=10)
442
+ c1, c2 = st.columns(2)
443
+ apply_tab2 = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=2)
444
+ with c2:
445
+ submit_tab2 = st.form_submit_button(":green[Confirm]")
446
+
447
+ if submit_tab2:
448
+ st.session_state.rotate = apply_tab2
449
+ st.rerun()
450
+
451
+ with tab3: # Filtering
452
+ st.write("πŸ“š :blue[*More about image filtering*] πŸ‘‰ \
453
+ [here](https://learnopencv.com/image-filtering-using-convolution-in-opencv/)")
454
+ selection = st.segmented_control("Filtering type",
455
+ ["Custom 2D Convolution", "Blurring"],
456
+ selection_mode="single")
457
+ match selection:
458
+ case "Custom 2D Convolution":
459
+ with st.form("tab3_1"):
460
+ st.write("πŸ“š :blue[*More about convolution matrix*] πŸ‘‰ \
461
+ [here](https://en.wikipedia.org/wiki/Kernel_(image_processing))")
462
+ text_convol = st.text_input("Write your custom kernel here (example : 1/9 * [[1,1,1], [1,1,1], [1,1,1]]):",
463
+ value=None)
464
+ kernel = None
465
+ if text_convol is not None:
466
+ try:
467
+ text_coeff, list_kernel, latex_code = text_kernel_to_latex(text_convol)
468
+ coeff = eval_expr(text_coeff)
469
+ kernel = coeff * np.array(list_kernel)
470
+ show_latex(latex_code)
471
+ except Exception as e:
472
+ st.exception(e)
473
+ text_convol = None
474
+ else:
475
+ text_coeff, list_kernel, latex_code = \
476
+ text_kernel_to_latex("1/9 * [[1,1,1], [1,1,1], [1,1,1]]")
477
+ show_latex(latex_code)
478
+
479
+ c1, c2 = st.columns(2)
480
+ apply_tab31 = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=3)
481
+ with c2:
482
+ submit_tab31 = st.form_submit_button(":green[Confirm]")
483
+
484
+ if submit_tab31:
485
+ st.session_state.convolution = apply_tab31
486
+ st.session_state.text_convol = text_convol
487
+ st.session_state.convol_kernel = kernel
488
+ st.rerun()
489
+
490
+ case "Blurring":
491
+ st.write("πŸ“š :blue[*More about blurring techniques*] πŸ‘‰ \
492
+ [here](https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html)")
493
+ b1, b2, b3, b4 = st.tabs(["Averaging", "Gaussian Blurring", "Median Blurring",
494
+ "Bilateral Filtering"])
495
+ # typ_blurring = st.segmented_control("Bluring type",
496
+ # ["Averaging", "Gaussian Blurring", "Median Blurring",
497
+ # "Bilateral Filtering"],
498
+ # selection_mode="multi")
499
+
500
+ with b1:
501
+ with st.form("tab_32a"):
502
+ st.markdown("πŸ’¬ :green[Averaging?]",
503
+ help="This is done by convolving an image with a normalized box filter.\
504
+ It simply takes the average of all the pixels under the kernel \
505
+ area and replaces the central element."
506
+ )
507
+ kernel_width = st.slider("Kernel size width:", 2, 20, None, 1)
508
+ kernel_height = st.slider("Kernel size height:", 2, 20, None, 1)
509
+
510
+ c1, c2 = st.columns(2)
511
+ apply_tab32a = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=4)
512
+ with c2:
513
+ submit_tab32a = st.form_submit_button(":green[Confirm]")
514
+
515
+ if submit_tab32a:
516
+ st.session_state.averaging = apply_tab32a
517
+ st.session_state.averaging_kernel_size = (kernel_width, kernel_height)
518
+ st.rerun()
519
+
520
+ with b2:
521
+ with st.form("tab_32b"):
522
+ st.markdown("πŸ’¬ :green[Gaussian Blurringing?]",
523
+ help="In this method, instead of a box filter, a Gaussian kernel is used. \
524
+ We should specify the width and height of the kernel which should be positive and odd. \
525
+ We also should specify the standard deviation in the X and Y directions, `sigmaX` and `sigmaY` respectively. \
526
+ If only `sigmaX` is specified, `sigmaY` is taken as the same as sigmaX. If both are given as zeros, they are \
527
+ calculated from the kernel size.\n \
528
+ Gaussian blurring is highly effective in removing Gaussian noise from an image.")
529
+ kernel_width = st.slider("Kernel size width:", 2, 20, None, 1,)
530
+ kernel_height = st.slider("Kernel size height:", 2, 20, None, 1)
531
+ st.markdown("Standard deviations of the Gaussian kernel:",
532
+ help="""The parameters `sigmaX` and `sigmaY` represent the standard deviations
533
+ of the Gaussian kernel in the horizontal (X) and vertical (Y) directions,
534
+ respectively. These values control the extent of blurring applied to the image.​\n
535
+ **Typical Values for sigmaX and sigmaY:**
536
+ - Low values (e.g., 1–3): Apply a mild blur, useful for slight noise reduction while preserving image details.​
537
+ - Moderate values (e.g., 5–10): Produce a more noticeable blur, helpful for reducing more significant noise or smoothing out textures.
538
+ - High values (e.g., >10): Result in a strong blur, which can be used for artistic effects or to obscure details.​
539
+ It's common practice to set sigmaX and sigmaY to 0. In this case, OpenCV calculates the standard deviations based on the kernel size (ksize).
540
+ If only sigmaX is specified and sigmaY is set to 0, OpenCV uses the same value for both directions. ​\n
541
+ **Recommendations:**
542
+ - Specify sigmaX and sigmaY explicitly: For precise control over the blurring effect, define both parameters based on the desired outcome.​
543
+ - Use sigmaX = 0 and sigmaY = 0: To allow OpenCV to compute the standard deviations automatically from the kernel size.​
544
+ - Choose an appropriate kernel size: The ksize parameter should be a tuple of positive odd integers (e.g., (3, 3), (5, 5)).
545
+ """)
546
+ sigmaX = st.slider("sigmaX:", 0, 20, 0, 1)
547
+ sigmaY = st.slider("sigmaY:", 0, 20, 0, 1)
548
+
549
+ c1, c2 = st.columns(2)
550
+ apply_tab32b = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=5)
551
+ with c2:
552
+ submit_tab32b = st.form_submit_button(":green[Confirm]")
553
+
554
+ if submit_tab32b:
555
+ st.session_state.gaussian_bluring = apply_tab32b
556
+ st.session_state.gb_kernel_size = (kernel_width, kernel_height)
557
+ st.session_state.sigmaX = sigmaX
558
+ st.session_state.sigmaY = sigmaY
559
+ st.rerun()
560
+
561
+ with b3:
562
+ with st.form("tab_32c"):
563
+ st.markdown("πŸ’¬ :green[Median Blurring?]",
564
+ help="It takes the median of all the pixels under the \
565
+ kernel area and the central element is replaced with this median value. Interestingly, in the above \
566
+ filters, the central element is a newly calculated value which may be a pixel value in the image or a new value. \
567
+ But in median blurring, the central element is always replaced by some pixel value in the image. \
568
+ It reduces the noise effectively. Its kernel size should be a positive odd integer.\n \
569
+ Median blurring is highly effective against salt-and-pepper noise in an image.")
570
+ kernel_size = st.slider("Kernel size:", 3, 15, None, 2, key=101)
571
+
572
+ c1, c2 = st.columns(2)
573
+ apply_tab32c = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=6)
574
+ with c2:
575
+ submit_tab32c = st.form_submit_button(":green[Confirm]")
576
+
577
+ if submit_tab32c:
578
+ st.session_state.median_bluring = apply_tab32c
579
+ st.session_state.mb_kernel_size = kernel_size
580
+ st.rerun()
581
+
582
+ with b4:
583
+ with st.form("tab_32d"):
584
+ st.markdown("πŸ’¬ :green[Bilateral Filtering?]",
585
+ help="It is highly effective in noise removal while \
586
+ keeping edges sharp. But the operation is slower compared to other filters. We already saw that a \
587
+ Gaussian filter takes the neighbourhood around the pixel and finds its Gaussian weighted average. \
588
+ This Gaussian filter is a function of space alone, that is, nearby pixels are considered while \
589
+ filtering. It doesn't consider whether pixels have almost the same intensity. It doesn't consider \
590
+ whether a pixel is an edge pixel or not. So it blurs the edges also, which we don't want to do.\n \
591
+ Bilateral filtering also takes a Gaussian filter in space, but one more Gaussian filter which is \
592
+ a function of pixel difference. \
593
+ The Gaussian function of space makes sure that only nearby pixels are considered for blurring, \
594
+ while the Gaussian function of intensity difference makes sure that only those pixels with similar \
595
+ intensities to the central pixel are considered for blurring. \
596
+ So it preserves the edges since pixels at edges will have large intensity variation.")
597
+ st.markdown("Diameter of each pixel neighborhood that is used during filtering:",
598
+ help=""" **Effect:**\n
599
+ A larger `d` value means that more neighboring pixels are considered in the filtering process, leading to a more pronounced
600
+ blurring effect. Conversely, a smaller `d` focuses the filter on a tighter area, preserving more details.​
601
+ **Automatic Calculation:**\n
602
+ If `d` is set to a non-positive value (e.g., 0 or negative), OpenCV automatically calculates it based on the sigmaSpace parameter.
603
+ Specifically, the radius is computed as `radius = cvRound(sigmaSpace * 1.5)`, and then `d = radius * 2 + 1` to ensure it's an odd
604
+ number. This ensures that the kernel has a central pixel. ​
605
+ **Typical Values for `d`:**\n
606
+ The choice of d depends on the desired balance between noise reduction and edge preservation:​
607
+ - Small d (e.g., 5 to 9): Suitable for subtle smoothing while maintaining edge sharpness.​
608
+ - Medium d (e.g., 9 to 15): Offers a balance between noise reduction and detail preservation.​
609
+ - Large d (e.g., 15 and above): Provides stronger blurring, which may be useful for artistic effects but can lead to loss of
610
+ fine details.​
611
+ **Recommendations:**\n
612
+ - Large filters (d > 5) are very slow, so it is recommended to use `d=5` for real-time applications, and perhaps
613
+ `d=9` for offline applications that need heavy noise filtering.
614
+ - Start with Moderate Values: Begin with `d=9`, `sigmaColor=75`, and `sigmaSpace=75` as a baseline. Adjust these values based on
615
+ the specific requirements of your application.​
616
+ - Consider Image Size: For larger images, you might need to increase `d` to achieve a noticeable effect. Conversely,
617
+ for smaller images, a smaller `d` might suffice.​
618
+ - Balance with `sigmaColor` and `sigmaSpace`: Ensure that `d` is appropriately balanced with `sigmaColor` and
619
+ `sigmaSpace`. An excessively large `sigmaSpace` with a small `d` might not utilize the full potential of the spatial filtering.
620
+ """)
621
+ d_value = st.slider("d:", 3, 15, None, 2)
622
+ st.markdown("`sigmaColor` and `sigmaSpace`:", help="""
623
+ `sigmaColor`: This parameter defines the filter sigma in the color space. A larger value means that pixels with more significant
624
+ color differences will be mixed together, resulting in areas of semi-equal color.​
625
+ `sigmaSpace`: This parameter defines the filter sigma in the coordinate space. A larger value means that pixels farther apart
626
+ will influence each other as long as their colors are close enough.​\n
627
+ These parameters work together to ensure that the filter smooths the image while preserving edges.​
628
+ **Typical Values for `sigmaColor` and `sigmaSpace`:**\n
629
+ The choice of `sigmaColor` and `sigmaSpace` depends on the specific application and the desired effect.
630
+ However, some commonly used values are:​
631
+ - `sigmaColor`: Values around 75 are often used for general smoothing while preserving edges.​
632
+ - `sigmaSpace`: Similarly, values around 75 are typical for maintaining edge sharpness while reducing noise.​
633
+ For example, applying the bilateral filter with `d=9`, `sigmaColor=75`, and `sigmaSpace=75` is a common practice.
634
+ **Recommendations:**`\n
635
+ - Start with Equal Values: Setting `sigmaColor` and `sigmaSpace` to the same value (e.g., 75) is a good starting point.​
636
+ - Adjust Based on Results: If the image appears too blurred, reduce the values. If noise is still present, increase them.​
637
+ - Consider Image Characteristics: For images with high noise, higher values may be necessary. For images where edge preservation
638
+ is critical, lower values are preferable.""")
639
+ sigma_color = st.slider("sigmaColor", 1, 255, None, 1)
640
+ sigma_space = st.slider("sigmaSpace", 1, 255, None, 1)
641
+
642
+ c1, c2 = st.columns(2)
643
+ apply_tab32d = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=7)
644
+ with c2:
645
+ submit_tab32d = st.form_submit_button(":green[Confirm]")
646
+
647
+ if submit_tab32d:
648
+ st.session_state.bilateral_filtering = apply_tab32d
649
+ st.session_state.d = d_value
650
+ st.session_state.sigma_color = sigma_color
651
+ st.session_state.sigma_space = sigma_space
652
+ st.rerun()
653
+
654
+ with tab4: # Morphologie
655
+ list_select = st.segmented_control("Morphological operation:",
656
+ ["Erosion", 'Dilation'],
657
+ selection_mode="multi")
658
+ if "Erosion" in list_select:
659
+ with st.form("tab_4a"):
660
+ st.markdown("πŸ’¬ :green[Erosion?]",
661
+ help="The basic idea of erosion is just like soil erosion only, it erodes \
662
+ away the boundaries of foreground object (Always try to keep foreground in white). \
663
+ So what it does? The kernel slides through the image (as in 2D convolution). A pixel in the \
664
+ original image (either 1 or 0) will be considered 1 only if all the pixels under the kernel is 1, \
665
+ otherwise it is eroded (made to zero). \n \
666
+ So what happends is that, all the pixels near boundary will be discarded depending upon the \
667
+ size of kernel. So the thickness or size of the foreground object decreases or simply white region \
668
+ decreases in the image. \n\
669
+ It is useful for removing small white noises, detach two connected objects etc. \n \
670
+ :orange[**Best practice :** convert to grayscale before apply erosion.]​")
671
+ kernel_size_ero = st.slider("Kernel size:", 3, 21, 3, 2, key=102)
672
+ nb_iter = st.slider('Iterations number:', 1, 7, 1, 1, key=201)
673
+
674
+ c1, c2 = st.columns(2)
675
+ apply_tab4a = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=8)
676
+ with c2:
677
+ submit_tab4a = st.form_submit_button(":green[Confirm]")
678
+
679
+ if submit_tab4a:
680
+ st.session_state.erosion = apply_tab4a
681
+ st.session_state.erosion_kernel_size = kernel_size_ero
682
+ st.session_state.nb_iter_erosion = nb_iter
683
+ st.rerun()
684
+
685
+ if "Dilation" in list_select:
686
+ with st.form("tab_4b"):
687
+ st.markdown("πŸ’¬ :green[Dilation?]",
688
+ help="The opposite of an erosion is a dilation. Just like an \
689
+ erosion will eat away at the foreground pixels, a dilation will grow the foreground pixels. \
690
+ Dilations increase the size of foreground objects and are especially useful for joining broken \
691
+ parts of an image together. Dilations, just as an erosion, also utilize structuring elements \
692
+ β€” a center pixel p of the structuring element is set to white if ANY pixel in the structuring \
693
+ element is > 0. \n \
694
+ :orange[**Best practice :** convert to grayscale before apply dilation.]​")
695
+ kernel_size_dil = st.slider("Kernel size:", 3, 21, 3, 2, key=103)
696
+ nb_iter = st.slider('Iterations number:', 1, 7, 1, 1, key=202)
697
+ kernel = np.ones((kernel_size_dil,kernel_size_dil),np.uint8)
698
+
699
+ c1, c2 = st.columns(2)
700
+ apply_tab4b = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=9)
701
+ with c2:
702
+ submit_tab4b = st.form_submit_button(":green[Confirm]")
703
+
704
+ if submit_tab4b:
705
+ st.session_state.dilation = apply_tab4b
706
+ st.session_state.dilation_kernel_size = kernel_size_dil
707
+ st.session_state.nb_iter_dilation = nb_iter
708
+ st.rerun()
709
+
710
+ with tab5: # Thresholding
711
+ selection = st.segmented_control("Type:", ["Binarization", "Adaptative thresholding"])
712
+ match selection:
713
+ case "Binarization":
714
+ with st.form("tab5_a"):
715
+ st.markdown("πŸ’¬ :green[What is thresholding?]",
716
+ help='''Thresholding is the binarization of an image. In general, we seek to
717
+ convert a grayscale image to a binary image, where the pixels are either
718
+ 0 or 255.
719
+ A simple thresholding example would be selecting a threshold value T,
720
+ and then setting all pixel intensities less than T to 0, and all pixel
721
+ values greater than T to 255. In this way, we are able to create a binary
722
+ representation of the image.''')
723
+ st.markdown("*:orange[⚠ Image must be in gray scale]*")
724
+ cols_tab1 = st.columns([0.1, 0.9], gap="medium", vertical_alignment="center")
725
+ with cols_tab1[1]:
726
+ thresholding_type = cols_tab1[1].selectbox("Thresholding type:",
727
+ list(dict_thresholding_type.keys()))
728
+ with cols_tab1[0].popover(":material/info:", help="Help on thresholding type",
729
+ use_container_width=False):
730
+ st.link_button("πŸ“š:blue[cf. OpenCV documentation :]",
731
+ "https://docs.opencv.org/3.0-beta/modules/imgproc/doc/miscellaneous_transformations.html#threshold")
732
+
733
+ thresh = st.slider("Thresh :", 0, 255, 255, 1)
734
+ if thresholding_type in ["cv.THRESH_BINARY", "cv.THRESH_BINARY_INV"]:
735
+ value = st.slider("Value :", 0, 255, 255, 1)
736
+ else:
737
+ value = 255
738
+
739
+ cols_tab3 = st.columns(2, gap="medium", vertical_alignment="center")
740
+ otsu = cols_tab3[0].checkbox("Optimum Global Thresholding using Otsu’s Method?",
741
+ help='''Otsu’s method tries to find a threshold value
742
+ which minimizes the weighted within-class variance.
743
+ Since Variance is the spread of the distribution
744
+ about the mean. Thus, minimizing the within-class
745
+ variance will tend to make the classes compact.''')
746
+ cols_tab3[1].link_button("πŸ“š:blue[Documentation]",
747
+ "https://theailearner.com/2019/07/19/optimum-global-thresholding-using-otsus-method/")
748
+
749
+ thresh_typ = dict_thresholding_type[thresholding_type]
750
+
751
+ c1, c2 = st.columns(2)
752
+ apply_tab5a = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=10)
753
+ with c2:
754
+ submit_tab5a = st.form_submit_button(":green[Confirm]")
755
+
756
+ if submit_tab5a:
757
+ if otsu:
758
+ thresh_typ = thresh_typ+cv2.THRESH_OTSU
759
+ st.session_state.binarization = apply_tab5a
760
+ st.session_state.bin_thresh = thresh
761
+ st.session_state.bin_value = value
762
+ st.session_state.bin_thresholding_type = thresholding_type
763
+ st.session_state.bin_otsu = otsu
764
+ st.session_state.thresh_typ = thresh_typ
765
+ st.rerun()
766
+
767
+ case "Adaptative thresholding":
768
+ with st.form("tab5_b"):
769
+ st.markdown("πŸ’¬ :green[What is adaptative thresholding?]",
770
+ help='''This is a usefull technique when dealing with images having non-uniform illumination.
771
+ In this, the threshold value is calculated separately for each pixel using
772
+ some statistics obtained from its neighborhood. This way we will get different thresholds
773
+ for different image regions and thus tackles the problem of varying illumination.''')
774
+ st.markdown("*:orange[⚠ Image must be in gray scale]*")
775
+ thresholding_type = st.selectbox("Thresholding type:",
776
+ list(dict_thresholding_type.keys())[:2])
777
+ max_value = st.slider("Max value :", 0, 255, 255, 1,
778
+ help="""This is the value assigned to the pixels after thresholding.
779
+ This depends on the thresholding type. If the type is cv2.THRESH_BINARY,
780
+ all the pixels greater than the threshold are assigned this maxValue.""")
781
+ adaptative_method = st.selectbox("Adaptative method:",
782
+ list(dict_adaptative_method.keys()),
783
+ help="""This tells us how the threshold is calculated from the pixel neighborhood.
784
+ This currently supports two methods:
785
+ - cv2.ADAPTIVE_THRESH_MEAN_C: In this, the threshold value is the mean of the neighborhood area.\n
786
+ - cv2.ADAPTIVE_THRESH_GAUSSIAN_C: In this, the threshold value is the weighted sum of the
787
+ neighborhood area. This uses Gaussian weights computed using getGaussiankernel() method.""")
788
+ block_size = st.slider("Block size:", 3, 21, 3, 2,
789
+ help='''**πŸ” What is blockSize?**\n
790
+ In adaptive thresholding, the threshold for each pixel is determined based on a local neighborhood around it.
791
+ The blockSize parameter specifies the size of this neighborhood.
792
+ Specifically, it defines the dimensions of the square region (of size blockSize Γ— blockSize) centered on the pixel being processed.
793
+ The threshold is then calculated based on the pixel values within this region.​\n
794
+ **βœ… Acceptable Values for blockSize**\n
795
+ Must be an odd integer greater than 1: This ensures that the neighborhood has a central pixel.​
796
+ Common choices: 3, 5, 7, 9, 11, 13, 15, etc.​
797
+ Even numbers are invalid: Using an even blockSize (e.g., 2, 4, 6) would result in an error because
798
+ there would be no central pixel in the neighborhood.​\n
799
+ **🎯 Impact of blockSize on Thresholding**\n
800
+ Smaller blockSize (e.g., 3 or 5):​\n
801
+ - Captures fine details and small variations in illumination.​
802
+ - May be more sensitive to noise.​\n
803
+ Larger blockSize (e.g., 15 or 21):​\n
804
+ - Provides smoother thresholding, reducing the effect of noise.​
805
+ - Might overlook small features or details.
806
+
807
+ Choosing the appropriate blockSize depends on the specific characteristics of your image and the details you wish to preserve or suppress.''')
808
+ const = st.slider("C:", -10, 20, 0, 1,
809
+ help='''The parameter C serves as a constant subtracted from the computed mean or weighted mean of the
810
+ neighborhood pixels. This subtraction fine-tunes the thresholding process, allowing for better control
811
+ over the binarization outcome.
812
+ **🎯 Typical Values for C**
813
+ The optimal value for C varies depending on the image's characteristics, such as lighting conditions and noise levels. Commonly used values include:​
814
+ - 2 to 10: These values are often effective for standard images with moderate lighting variations.​
815
+ - Higher values (e.g., 15 or 20): Useful for images with significant noise or when a more aggressive thresholding is needed.​
816
+ - Negative values: Occasionally used to make the thresholding more lenient, capturing lighter details that might otherwise be missed.​
817
+
818
+ It's advisable to experiment with different C values to determine the most suitable one for your specific application. ''')
819
+
820
+ c1, c2 = st.columns(2)
821
+ apply_tab5b = c1.toggle("Apply", help="Click here to indicate whether the operation should be carried out or not, then validate with Confirm.", key=11)
822
+ with c2:
823
+ submit_tab5b = st.form_submit_button(":green[Confirm]")
824
+
825
+ if submit_tab5b:
826
+ st.session_state.adaptative_thresh = apply_tab5b
827
+ st.session_state.at_max_value = max_value
828
+ st.session_state.at_adaptative_method = adaptative_method
829
+ st.session_state.at_thresholding_type = thresholding_type
830
+ st.session_state.at_block_size = block_size
831
+ st.session_state.at_const = const
832
+ st.rerun()
833
+
834
+ col1_a, col1_b = col1.columns(2)
835
+ if col1_a.button("πŸ“ƒ :blue[List of operations]"):
836
+ col1_a.write(list_op)
837
+
838
+ if col1_b.button("Prepare download"):
839
+ if len(img_wrk.shape) == 2:
840
+ pil_img = Image.fromarray(img_wrk).convert("L")
841
+ else:
842
+ img_rgb = cv2.cvtColor(img_wrk, cv2.COLOR_BGR2RGB)
843
+ pil_img = Image.fromarray(img_rgb)
844
+ img_bytes = io.BytesIO()
845
+ pil_img.save(img_bytes, format='PNG')
846
+ img_bytes.seek(0)
847
+ col1_b.download_button(
848
+ label="Download processed image",
849
+ data=img_bytes,
850
+ file_name="processed_image.png",
851
+ on_click="ignore",
852
+ icon=":material/download:",
853
+ mime="image/png"
854
+ )
855
+
856
+ with col2.expander(":blue[Quick overview of OCR recognition (with PPOCR)]", expanded=True):
857
+ with st.form("form1"):
858
+ key_ppocr_lang = st.selectbox("Choose language: :", dict_lang_ppocr.keys(), 20)
859
+ res_cnt = st.empty()
860
+ submit_detect = st.form_submit_button("Launch overview")
861
+
862
+ ##----------- Process OCR --------------------------------------------------------------
863
+ if submit_detect:
864
+ with res_cnt, st.spinner("PPOCR initialization ..."):
865
+ ocr = PaddleOCR(lang=dict_lang_ppocr[key_ppocr_lang]) #, show_log=False)
866
+ with res_cnt, st.spinner("OCR process ..."):
867
+ result = ocr.ocr(img_wrk)
868
+ # draw result
869
+ result = result[0]
870
+ if len(img_wrk.shape) == 3:
871
+ image = img_wrk.copy()
872
+ else:
873
+ image = cv2.cvtColor(img_wrk, cv2.COLOR_GRAY2RGB)
874
+ boxes = [line[0] for line in result]
875
+
876
+ txts = [line[1][0] for line in result]
877
+ scores = [line[1][1] for line in result]
878
+ im_show = draw_ocr(image, boxes, txts, scores, font_path='./fonts/french.ttf')
879
+ im_show = Image.fromarray(im_show)
880
+ res_cnt.image(im_show, use_container_width=True)
img_demo_enhance.png ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit==1.43.0
2
+ opencv-python-headless==4.9.0.80
3
+ Pillow
4
+ paddleocr==2.8.0
5
+ paddlepaddle==2.6.0
6
+ numpy #==1.23.4
7
+ imutils