Spaces:
Build error
Build error
Commit
·
598166e
1
Parent(s):
349bd07
update threshodling methods
Browse files- .vscode/launch.json +16 -0
- __pycache__/Thresholding_interface.cpython-310.pyc +0 -0
- __pycache__/threshold.cpython-310.pyc +0 -0
- __pycache__/threshold_methods.cpython-310.pyc +0 -0
- app.py +23 -43
- huggingface.png +0 -0
- image_0.png +0 -0
- threshold_methods.py +303 -0
.vscode/launch.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
// Use IntelliSense to learn about possible attributes.
|
3 |
+
// Hover to view descriptions of existing attributes.
|
4 |
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
5 |
+
"version": "0.2.0",
|
6 |
+
"configurations": [
|
7 |
+
{
|
8 |
+
"name": "Python: Current File",
|
9 |
+
"type": "python",
|
10 |
+
"request": "launch",
|
11 |
+
"program": "${file}",
|
12 |
+
"console": "integratedTerminal",
|
13 |
+
"justMyCode": true
|
14 |
+
}
|
15 |
+
]
|
16 |
+
}
|
__pycache__/Thresholding_interface.cpython-310.pyc
ADDED
Binary file (5.82 kB). View file
|
|
__pycache__/threshold.cpython-310.pyc
ADDED
Binary file (5.81 kB). View file
|
|
__pycache__/threshold_methods.cpython-310.pyc
ADDED
Binary file (3.36 kB). View file
|
|
app.py
CHANGED
@@ -1,50 +1,30 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
import cv2
|
3 |
-
import requests
|
4 |
-
import os
|
5 |
-
#pirahansiah/ComputerVision
|
6 |
-
from ultralytics import YOLO
|
7 |
|
8 |
-
|
9 |
-
|
10 |
]
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
def show_preds_image(image_path):
|
29 |
-
image = cv2.imread(image_path,0)
|
30 |
-
return image #cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
31 |
-
|
32 |
-
inputs_image = [
|
33 |
-
gr.components.Image(type="filepath", label="Input Image"),
|
34 |
-
]
|
35 |
-
outputs_image = [
|
36 |
-
gr.components.Image(type="numpy", label="Output Image"),
|
37 |
-
]
|
38 |
-
interface_image = gr.Interface(
|
39 |
-
fn=show_preds_image,
|
40 |
-
inputs=inputs_image,
|
41 |
-
outputs=outputs_image,
|
42 |
-
title="Computer Vision and Deep Learning by Farshid PirahanSiah",
|
43 |
-
examples=path,
|
44 |
-
cache_examples=False,
|
45 |
)
|
46 |
|
47 |
gr.TabbedInterface(
|
48 |
-
[
|
49 |
-
tab_names=['Image
|
50 |
-
).queue().launch(
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from threshold_methods import threshold_methods
|
3 |
import cv2
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
new_outputs = [
|
6 |
+
gr.outputs.Image(type="numpy", label="Output Image")
|
7 |
]
|
8 |
+
def show_image():
|
9 |
+
img = cv2.imread('huggingface.png')
|
10 |
+
return img
|
11 |
+
|
12 |
+
HuggingFace = gr.Interface(
|
13 |
+
fn=show_image,
|
14 |
+
live=True,
|
15 |
+
inputs=[],
|
16 |
+
outputs=new_outputs,
|
17 |
+
hide_controls=True,
|
18 |
+
hide_inputs=True,
|
19 |
+
show_submit_buttom=False,
|
20 |
+
show_clear=False,
|
21 |
+
show_generate=False,
|
22 |
+
allow_flagging=False,
|
23 |
+
title="https://huggingface.co/spaces/pirahansiah/ComputerVision",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
)
|
25 |
|
26 |
gr.TabbedInterface(
|
27 |
+
[HuggingFace,threshold_methods],
|
28 |
+
tab_names=['HuggingFace','Thresholding Image Segmentation']
|
29 |
+
).queue().launch()
|
30 |
+
|
huggingface.png
ADDED
![]() |
image_0.png
ADDED
![]() |
threshold_methods.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
|
6 |
+
def pirahansiah_threshold_method_find_threshold_values_2(grayImg):
|
7 |
+
#http://www.jatit.org/volumes/Vol95No21/1Vol95No21.pdf
|
8 |
+
#https://pdfs.semanticscholar.org/05b2/d39fce4e8a99897e95f8c75416f65a5a0acc.pdf
|
9 |
+
assert grayImg is not None, "file could not be read, check with os.path.exists()"
|
10 |
+
#img = cv2.GaussianBlur(self.grayImg, (3, 3), 0)
|
11 |
+
img = grayImg
|
12 |
+
# Initialize an array to store the PSNR values for each threshold value
|
13 |
+
psnr_values = np.zeros(256)
|
14 |
+
psnr_max=0
|
15 |
+
th=0
|
16 |
+
# Iterate over all possible threshold values with a step size of 5
|
17 |
+
for t in range(0, 256, 5):
|
18 |
+
# Threshold the image using the current threshold value
|
19 |
+
_, binary = cv2.threshold(img, t, 255, cv2.THRESH_BINARY)
|
20 |
+
# Calculate the PSNR between the binary image and the original image
|
21 |
+
psnr = cv2.PSNR(binary, img)
|
22 |
+
# Store the PSNR value
|
23 |
+
psnr_values[t] = psnr
|
24 |
+
if (psnr_max<psnr):
|
25 |
+
psnr_max=psnr
|
26 |
+
th=t
|
27 |
+
# Calculate the mean PSNR value
|
28 |
+
mean_psnr = np.mean(psnr_values)
|
29 |
+
th=int(th/mean_psnr)
|
30 |
+
# Find the threshold values that satisfy the condition
|
31 |
+
thresh = th #np.argwhere((mean_psnr / k1 < psnr_values) & (psnr_values < mean_psnr / k2)).flatten()
|
32 |
+
|
33 |
+
return thresh
|
34 |
+
def pirahansiah_threshold_method_find_threshold_values_1(grayImg):
|
35 |
+
#https://www.jatit.org/volumes/Vol57No2/4Vol57No2.pdf
|
36 |
+
assert grayImg is not None, "file could not be read, check with os.path.exists()"
|
37 |
+
gray = cv2.GaussianBlur(grayImg, (3, 3), 0)
|
38 |
+
max1=0
|
39 |
+
max2=0
|
40 |
+
# Iterate over all possible threshold values
|
41 |
+
for t in range(0, 256, 10):
|
42 |
+
# Threshold the image using the current threshold value
|
43 |
+
_, binary = cv2.threshold(gray, t, 255, cv2.THRESH_BINARY)
|
44 |
+
# Find the contours in the binary image
|
45 |
+
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
46 |
+
if max1<=len(contours):
|
47 |
+
max1=len(contours)
|
48 |
+
max2=t
|
49 |
+
threshold_values =max2
|
50 |
+
return threshold_values
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
path = [['image_0.png']]
|
55 |
+
inputs_thresh = [
|
56 |
+
gr.inputs.Image(type="filepath", label="Input Image"),
|
57 |
+
gr.components.Slider(label="Manual Threshold Value", value=125, minimum=10, maximum=255, step=5),
|
58 |
+
gr.inputs.Radio(label="Threshold Methods",
|
59 |
+
choices=[
|
60 |
+
"cv2.threshold(grayImg, 128, 255, cv2.THRESH_BINARY)"
|
61 |
+
,"cv2.threshold(grayImg, 128, 255, cv2.THRESH_BINARY_INV)"
|
62 |
+
,"cv2.threshold(grayImg, 128, 255, cv2.THRESH_TRUNC)"
|
63 |
+
,"cv2.threshold(grayImg, 128, 255, cv2.THRESH_TOZERO)"
|
64 |
+
,"cv2.threshold(grayImg, 128, 255, cv2.THRESH_TOZERO_INV)"
|
65 |
+
,"cv2.adaptiveThreshold(grayImg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)"
|
66 |
+
,"cv2.threshold(grayImg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,)"
|
67 |
+
,"Adapted from PirahanSiah Threshold Method I derivative demo"
|
68 |
+
,"Inspired by PirahanSiah Threshold Method II derivative demo"
|
69 |
+
]),
|
70 |
+
]
|
71 |
+
|
72 |
+
outputs_thresh = [
|
73 |
+
gr.outputs.Image(type="numpy", label="Output Image")
|
74 |
+
]
|
75 |
+
|
76 |
+
def process_image(input_image, slider_val, radio_choice):
|
77 |
+
img = cv2.imread(input_image,0)
|
78 |
+
_, binaryImg = cv2.threshold(img, slider_val, 255, cv2.THRESH_BINARY)
|
79 |
+
|
80 |
+
if radio_choice == "cv2.threshold(grayImg, 128, 255, cv2.THRESH_BINARY)":
|
81 |
+
_, binaryImg=cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
|
82 |
+
elif radio_choice == "cv2.threshold(grayImg, 128, 255, cv2.THRESH_BINARY_INV)":
|
83 |
+
_, binaryImg=cv2.threshold(img, 128, 255, cv2.THRESH_BINARY_INV)
|
84 |
+
elif radio_choice == "cv2.threshold(grayImg, 128, 255, cv2.THRESH_TRUNC)":
|
85 |
+
_, binaryImg=cv2.threshold(img, 128, 255, cv2.THRESH_TRUNC)
|
86 |
+
elif radio_choice == "cv2.threshold(grayImg, 128, 255, cv2.THRESH_TOZERO)":
|
87 |
+
_, binaryImg=cv2.threshold(img, 128, 255, cv2.THRESH_TOZERO)
|
88 |
+
elif radio_choice == "cv2.threshold(grayImg, 128, 255, cv2.THRESH_TOZERO_INV)":
|
89 |
+
_, binaryImg=cv2.threshold(img, 128, 255, cv2.THRESH_TOZERO_INV)
|
90 |
+
elif radio_choice == "cv2.adaptiveThreshold(grayImg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)":
|
91 |
+
binaryImg=cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
|
92 |
+
elif radio_choice == "cv2.threshold(grayImg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,)":
|
93 |
+
_, binaryImg=cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,)
|
94 |
+
elif radio_choice == "Adapted from PirahanSiah Threshold Method I derivative demo":
|
95 |
+
threshval=pirahansiah_threshold_method_find_threshold_values_1(img)
|
96 |
+
_, binaryImg = cv2.threshold(img, threshval, 255, cv2.THRESH_BINARY)
|
97 |
+
elif radio_choice == "Inspired by PirahanSiah Threshold Method II derivative demo":
|
98 |
+
threshval=pirahansiah_threshold_method_find_threshold_values_2(img)
|
99 |
+
_, binaryImg = cv2.threshold(img, threshval, 255, cv2.THRESH_BINARY)
|
100 |
+
return binaryImg
|
101 |
+
|
102 |
+
def on_change(slider_val, radio_choice):
|
103 |
+
# Update output
|
104 |
+
outputs_thresh[0].update(process_image(
|
105 |
+
inputs_thresh[0].value,
|
106 |
+
slider_val,
|
107 |
+
radio_choice)
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
threshold_methods = gr.Interface(
|
112 |
+
fn=process_image,
|
113 |
+
inputs=inputs_thresh,
|
114 |
+
outputs=outputs_thresh,
|
115 |
+
on_change=on_change,
|
116 |
+
examples=path,
|
117 |
+
title="Computer Vision and Deep Learning by Farshid PirahanSiah",
|
118 |
+
live=True
|
119 |
+
)
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
# class Thresholding:
|
126 |
+
# def __init__(self, grayImg):
|
127 |
+
# self.grayImg = grayImg
|
128 |
+
|
129 |
+
# def manually_python(self):
|
130 |
+
# threshval = 128
|
131 |
+
# binaryImg = np.where(self.grayImg < threshval, self.grayImg, 0) if threshval < 128 else np.where(self.grayImg > threshval, self.grayImg, 0)
|
132 |
+
# return binaryImg
|
133 |
+
|
134 |
+
# def manually(self,threshval):
|
135 |
+
# binaryImg = np.zeros_like(self.grayImg)
|
136 |
+
# for i in range(self.grayImg.shape[0]): #height
|
137 |
+
# for j in range(self.grayImg.shape[1]): #width
|
138 |
+
# if self.grayImg[i, j] < threshval:
|
139 |
+
# binaryImg[i, j] = 0
|
140 |
+
# else:
|
141 |
+
# binaryImg[i, j] = 1
|
142 |
+
# return binaryImg
|
143 |
+
|
144 |
+
# def pirahansiah_threshold_method_find_threshold_values_2(self):
|
145 |
+
# #http://www.jatit.org/volumes/Vol95No21/1Vol95No21.pdf
|
146 |
+
# #https://pdfs.semanticscholar.org/05b2/d39fce4e8a99897e95f8c75416f65a5a0acc.pdf
|
147 |
+
# assert self.grayImg is not None, "file could not be read, check with os.path.exists()"
|
148 |
+
# #img = cv2.GaussianBlur(self.grayImg, (3, 3), 0)
|
149 |
+
# img = self.grayImg
|
150 |
+
# # Initialize an array to store the PSNR values for each threshold value
|
151 |
+
# psnr_values = np.zeros(256)
|
152 |
+
# psnr_max=0
|
153 |
+
# th=0
|
154 |
+
# # Iterate over all possible threshold values with a step size of 5
|
155 |
+
# for t in range(0, 256, 5):
|
156 |
+
# # Threshold the image using the current threshold value
|
157 |
+
# _, binary = cv2.threshold(img, t, 255, cv2.THRESH_BINARY)
|
158 |
+
# # Calculate the PSNR between the binary image and the original image
|
159 |
+
# psnr = cv2.PSNR(binary, img)
|
160 |
+
# # Store the PSNR value
|
161 |
+
# psnr_values[t] = psnr
|
162 |
+
# if (psnr_max<psnr):
|
163 |
+
# psnr_max=psnr
|
164 |
+
# th=t
|
165 |
+
# # Calculate the mean PSNR value
|
166 |
+
# mean_psnr = np.mean(psnr_values)
|
167 |
+
# th=int(th/mean_psnr)
|
168 |
+
# # Find the threshold values that satisfy the condition
|
169 |
+
# thresh = th #np.argwhere((mean_psnr / k1 < psnr_values) & (psnr_values < mean_psnr / k2)).flatten()
|
170 |
+
|
171 |
+
# return thresh
|
172 |
+
# def pirahansiah_threshold_method_find_threshold_values_1(self):
|
173 |
+
# #https://www.jatit.org/volumes/Vol57No2/4Vol57No2.pdf
|
174 |
+
# assert self.grayImg is not None, "file could not be read, check with os.path.exists()"
|
175 |
+
# gray = cv2.GaussianBlur(self.grayImg, (3, 3), 0)
|
176 |
+
# max1=0
|
177 |
+
# max2=0
|
178 |
+
# # Iterate over all possible threshold values
|
179 |
+
# for t in range(0, 256, 10):
|
180 |
+
# # Threshold the image using the current threshold value
|
181 |
+
# _, binary = cv2.threshold(gray, t, 255, cv2.THRESH_BINARY)
|
182 |
+
# # Find the contours in the binary image
|
183 |
+
# contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
184 |
+
# if max1<=len(contours):
|
185 |
+
# max1=len(contours)
|
186 |
+
# max2=t
|
187 |
+
# threshold_values =max2
|
188 |
+
# return threshold_values
|
189 |
+
|
190 |
+
|
191 |
+
# def opencv_th(self):
|
192 |
+
# font = cv2.FONT_HERSHEY_SIMPLEX
|
193 |
+
# fontScale = 2
|
194 |
+
# color = (0, 0, 0)
|
195 |
+
# colorInv = (255, 255, 255)
|
196 |
+
# thickness = 2
|
197 |
+
# # Set the position of the text
|
198 |
+
# textX = 25
|
199 |
+
# textY = 45
|
200 |
+
# textSize, _ = cv2.getTextSize("Otsu Method ", font, fontScale, thickness)
|
201 |
+
# # Draw a white rectangle behind the text
|
202 |
+
|
203 |
+
# # Apply different thresholding methods
|
204 |
+
# _, binaryImg = cv2.threshold(self.grayImg, 128, 255, cv2.THRESH_BINARY)
|
205 |
+
# cv2.rectangle(binaryImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
206 |
+
# cv2.putText(binaryImg, 'Binary', (textX, textY), font, fontScale, color, thickness)
|
207 |
+
|
208 |
+
# _, binaryInvImg = cv2.threshold(self.grayImg, 128, 255, cv2.THRESH_BINARY_INV)
|
209 |
+
# cv2.rectangle(binaryInvImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
210 |
+
# cv2.putText(binaryInvImg, 'Binary Inv', (textX, textY), font, fontScale, color, thickness)
|
211 |
+
|
212 |
+
# _, truncImg = cv2.threshold(self.grayImg, 128, 255, cv2.THRESH_TRUNC)
|
213 |
+
# cv2.rectangle(truncImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
214 |
+
# cv2.putText(truncImg, 'Trunc', (textX, textY), font, fontScale, color, thickness)
|
215 |
+
|
216 |
+
# _, toZeroImg = cv2.threshold(self.grayImg, 128, 255, cv2.THRESH_TOZERO)
|
217 |
+
# cv2.rectangle(toZeroImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
218 |
+
# cv2.putText(toZeroImg, 'To Zero', (textX, textY), font, fontScale, color, thickness)
|
219 |
+
|
220 |
+
# _, toZeroInvImg = cv2.threshold(self.grayImg, 128, 255, cv2.THRESH_TOZERO_INV)
|
221 |
+
# cv2.rectangle(toZeroInvImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
222 |
+
# cv2.putText(toZeroInvImg, 'To Zero Inv', (textX, textY), font, fontScale, color, thickness)
|
223 |
+
|
224 |
+
# adaptiveImg = cv2.adaptiveThreshold(self.grayImg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
|
225 |
+
# cv2.rectangle(adaptiveImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
226 |
+
# cv2.putText(adaptiveImg, 'Adaptive', (textX, textY), font, fontScale, color, thickness)
|
227 |
+
|
228 |
+
# otsu_threshold, image_result = cv2.threshold(self.grayImg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,)
|
229 |
+
# cv2.rectangle(image_result, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
230 |
+
# cv2.putText(image_result, 'Otsu Threshold Method', (textX, textY), font, fontScale, color, thickness)
|
231 |
+
|
232 |
+
# threshval=self.pirahansiah_threshold_method_find_threshold_values_1()
|
233 |
+
# th_img = th.manually(threshval)
|
234 |
+
# binaryImg = th_img * 255
|
235 |
+
# cv2.rectangle(binaryImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
236 |
+
# cv2.putText(binaryImg, 'PirahanSiah Threshold ', (textX, textY), font, fontScale, color, thickness)
|
237 |
+
|
238 |
+
|
239 |
+
# cv2.rectangle(self.grayImg, (textX, textY - textSize[1]), (textX + textSize[0], textY), (255, 255, 255), -1)
|
240 |
+
# cv2.putText(self.grayImg, 'Original', (textX, textY), font, fontScale, color, thickness)
|
241 |
+
# # Concatenate the images into a grid with 3 rows and 3 columns
|
242 |
+
# row1 = np.concatenate((self.grayImg, binaryImg, binaryInvImg), axis=1)
|
243 |
+
# row2 = np.concatenate((truncImg, toZeroImg, toZeroInvImg), axis=1)
|
244 |
+
# row3 = np.concatenate((adaptiveImg, image_result, binaryImg), axis=1) # np.zeros_like(adaptiveImg)
|
245 |
+
# concatenatedImg = np.concatenate((row1, row2, row3), axis=0)
|
246 |
+
# # Resize the concatenated image to fit the screen resolution
|
247 |
+
# screenRes = (1920-200, 1080-200)
|
248 |
+
# scaleWidth = screenRes[0] / concatenatedImg.shape[1]
|
249 |
+
# scaleHeight = screenRes[1] / concatenatedImg.shape[0]
|
250 |
+
# scale = min(scaleWidth, scaleHeight)
|
251 |
+
# windowWidth = int(concatenatedImg.shape[1] * scale)
|
252 |
+
# windowHeight = int(concatenatedImg.shape[0] * scale)
|
253 |
+
# resizedImg = cv2.resize(concatenatedImg, (windowWidth, windowHeight))
|
254 |
+
# # Display the resized image
|
255 |
+
# cv2.imshow('Thresholded Images', resizedImg)
|
256 |
+
# cv2.waitKey(0)
|
257 |
+
# cv2.destroyAllWindows()
|
258 |
+
|
259 |
+
|
260 |
+
# if __name__ == "__main__":
|
261 |
+
# img = cv2.imread("opencv.png")
|
262 |
+
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
263 |
+
# th = Thresholding(gray)
|
264 |
+
# th.opencv_th()
|
265 |
+
|
266 |
+
#threshval = 128
|
267 |
+
# read an image
|
268 |
+
# convert it to grayscale
|
269 |
+
#threshval=th.pirahansiah_threshold_method_find_threshold_values_1()
|
270 |
+
# threshval=th.pirahansiah_threshold_method_find_threshold_values_2()
|
271 |
+
# th_img = th.manually(threshval)
|
272 |
+
# binaryImg = th_img * 255
|
273 |
+
# cv2.imshow('image', binaryImg)
|
274 |
+
# cv2.waitKey(100)
|
275 |
+
|
276 |
+
|
277 |
+
'''
|
278 |
+
cv::Mat binaryImg = threshval < 128 ? (grayImg < threshval) : (grayImg > threshval);
|
279 |
+
#thresholding #opencv #python
|
280 |
+
|
281 |
+
The PirahanSiah’s method for thresholding, described in the paper, uses a gray-scale histogram,
|
282 |
+
thresholding range, and the Peak Signal-to-Noise Ratio (PSNR) to segment images and find the best
|
283 |
+
threshold values to binarize the image. They argue that thresholding is an important problem in
|
284 |
+
pattern recognition and use the PSNR quality measure to assess the similarities between the
|
285 |
+
original and binarized image. They calculate PSNRs for every threshold value and use the
|
286 |
+
difference between the PSNR of the previous threshold image and the new one to select the
|
287 |
+
threshold value. They also describe a multi-threshold algorithm that applies multiple
|
288 |
+
threshold values and computes the total number of blobs or objects in an image for each threshold.
|
289 |
+
The peak threshold values are those with the highest total number of blobs compared to their threshold neighbors.
|
290 |
+
In addition, their method uses thresholding on images suitable for OCR systems, LPR systems, etc.
|
291 |
+
|
292 |
+
The proposed adaptive threshold method, based on the Peak Signal-to-Noise Ratio (PSNR),
|
293 |
+
has the potential to be applied in all domains, such as LPR and OCR. The proposed algorithm
|
294 |
+
achieves competitive results in four databases, including Malaysian vehicle, standard,
|
295 |
+
printed and handwritten images. The objective of this research was to develop a new single
|
296 |
+
adaptive thresholding algorithm that works for a wide range of pattern recognition applications.
|
297 |
+
The proposed method has been implemented in four different types of applications and compared
|
298 |
+
with other methods. The results show that the proposed algorithm achieves the objective because
|
299 |
+
it has obtained reasonable results in all four areas/domains.
|
300 |
+
https://www.jatit.org/volumes/Vol57No2/4Vol57No2.pdf
|
301 |
+
http://www.jatit.org/volumes/Vol95No21/1Vol95No21.pdf
|
302 |
+
https://pdfs.semanticscholar.org/05b2/d39fce4e8a99897e95f8c75416f65a5a0acc.pdf
|
303 |
+
'''
|