Spaces:
Sleeping
Sleeping
Update utils.py
Browse files
utils.py
CHANGED
@@ -9,6 +9,7 @@ from pytorch_grad_cam import GradCAM
|
|
9 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
10 |
import matplotlib.pyplot as plt
|
11 |
import textwrap
|
|
|
12 |
|
13 |
def apply_normalization(chennels):
|
14 |
return nn.BatchNorm2d(chennels)
|
@@ -88,6 +89,19 @@ def resize_image(image, target_size=(200, 200)):
|
|
88 |
def wrap_text(text, width=20):
|
89 |
return textwrap.fill(text, width)
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
def save_plot_as_image(images,texts, output_path):
|
92 |
num_images = len(images)
|
93 |
num_cols = min(4, num_images) # Assuming you want a maximum of 4 columns
|
@@ -107,9 +121,9 @@ def save_plot_as_image(images,texts, output_path):
|
|
107 |
else:
|
108 |
ax.axis('off')
|
109 |
plt.tight_layout()
|
110 |
-
# plt.savefig(
|
111 |
# plt.close()
|
112 |
-
return plt
|
113 |
|
114 |
|
115 |
# Function to run inference and return top classes
|
@@ -156,38 +170,6 @@ def get_gradcam(model,input_img, opacity,layer):
|
|
156 |
figure = save_plot_as_image(final_outputs,texts, "plot.png")
|
157 |
return figure
|
158 |
|
159 |
-
# # Function to run inference and return top classes
|
160 |
-
# def get_gradcam(model,input_img, opacity,layer):
|
161 |
-
# targets = None
|
162 |
-
# inv_normalize = transforms.Normalize(
|
163 |
-
# mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
|
164 |
-
# std=[1/0.23, 1/0.23, 1/0.23]
|
165 |
-
# )
|
166 |
-
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
167 |
-
# transform = transforms.ToTensor()
|
168 |
-
# input_img = transform(input_img)
|
169 |
-
# input_img = input_img.to(device)
|
170 |
-
# input_img = input_img.unsqueeze(0)
|
171 |
-
# outputs = model(input_img)
|
172 |
-
# _, prediction = torch.max(outputs, 1)
|
173 |
-
# if layer == "layer3":
|
174 |
-
# target_layers = [model.convlayer3[-2]]
|
175 |
-
# if layer == "layer2":
|
176 |
-
# target_layers = [model.convlayer2[-2]]
|
177 |
-
# if layer == "layer1":
|
178 |
-
# target_layers = [model.convlayer1[-2]]
|
179 |
-
# #target_layers = [model.convlayer3[-2]]
|
180 |
-
# cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
|
181 |
-
# grayscale_cam = cam(input_tensor=input_img, targets=targets)
|
182 |
-
# grayscale_cam = grayscale_cam[0, :]
|
183 |
-
# img = input_img.squeeze(0).to('cpu')
|
184 |
-
# img = inv_normalize(img)
|
185 |
-
# rgb_img = np.transpose(img, (1, 2, 0))
|
186 |
-
# rgb_img = rgb_img.numpy()
|
187 |
-
# visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True, image_weight=opacity)
|
188 |
-
# return visualization
|
189 |
-
|
190 |
-
|
191 |
def get_misclassified_images(show_misclassified,num):
|
192 |
if show_misclassified:
|
193 |
return cv2.imread(f"missclassified_images_examples/{int(num)}.png")
|
@@ -208,27 +190,4 @@ def main_inference(num_of_output_classes,classes,model,input_img):
|
|
208 |
confidences = {classes[i]:float(out[i]) for i in range(num_of_output_classes)}
|
209 |
outputs = model(input_img)
|
210 |
_, prediction = torch.max(outputs, 1)
|
211 |
-
return confidences
|
212 |
-
# def run_inference(input_img, num_of_output_classes,transparency):
|
213 |
-
# transform = transforms.ToTensor()
|
214 |
-
# input_img = transform(input_img)
|
215 |
-
# input_img = input_img.to(device)
|
216 |
-
# input_img = input_img.unsqueeze(0)
|
217 |
-
# softmax = torch.nn.Softmax(dim=0)
|
218 |
-
# outputs = model(input_img)
|
219 |
-
# out = softmax(outputs.flatten())
|
220 |
-
# _, prediction = torch.max(outputs, 1)
|
221 |
-
# confidences = {classes[i]:float(out[i]) for i in range(num_of_output_classes)}
|
222 |
-
# target_layers = [model.convlayer3[-2]]
|
223 |
-
|
224 |
-
# cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
|
225 |
-
# grayscale_cam = cam(input_tensor=input_img, targets=targets)
|
226 |
-
# grayscale_cam = grayscale_cam[0, :]
|
227 |
-
# img = input_img.squeeze(0).to('cpu')
|
228 |
-
# img = inv_normalize(img)
|
229 |
-
# rgb_img = np.transpose(img, (1, 2, 0))
|
230 |
-
# rgb_img = rgb_img.numpy()
|
231 |
-
# visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
232 |
-
# return confidences, rgb_img, transparency,grayscale_cam
|
233 |
-
|
234 |
-
|
|
|
9 |
from pytorch_grad_cam.utils.image import show_cam_on_image
|
10 |
import matplotlib.pyplot as plt
|
11 |
import textwrap
|
12 |
+
import io
|
13 |
|
14 |
def apply_normalization(chennels):
|
15 |
return nn.BatchNorm2d(chennels)
|
|
|
89 |
def wrap_text(text, width=20):
|
90 |
return textwrap.fill(text, width)
|
91 |
|
92 |
+
import io
|
93 |
+
# define a function which returns an image as numpy array from figure
|
94 |
+
def get_img_from_fig(fig, dpi=180):
|
95 |
+
buf = io.BytesIO()
|
96 |
+
fig.savefig(buf, format="png", dpi=dpi)
|
97 |
+
buf.seek(0)
|
98 |
+
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
|
99 |
+
buf.close()
|
100 |
+
img = cv2.imdecode(img_arr, 1)
|
101 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
102 |
+
|
103 |
+
return img
|
104 |
+
|
105 |
def save_plot_as_image(images,texts, output_path):
|
106 |
num_images = len(images)
|
107 |
num_cols = min(4, num_images) # Assuming you want a maximum of 4 columns
|
|
|
121 |
else:
|
122 |
ax.axis('off')
|
123 |
plt.tight_layout()
|
124 |
+
# plt.savefig("tmp_arrays.png")
|
125 |
# plt.close()
|
126 |
+
return get_img_from_fig(plt)
|
127 |
|
128 |
|
129 |
# Function to run inference and return top classes
|
|
|
170 |
figure = save_plot_as_image(final_outputs,texts, "plot.png")
|
171 |
return figure
|
172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
def get_misclassified_images(show_misclassified,num):
|
174 |
if show_misclassified:
|
175 |
return cv2.imread(f"missclassified_images_examples/{int(num)}.png")
|
|
|
190 |
confidences = {classes[i]:float(out[i]) for i in range(num_of_output_classes)}
|
191 |
outputs = model(input_img)
|
192 |
_, prediction = torch.max(outputs, 1)
|
193 |
+
return confidences
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|