Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -78,9 +78,9 @@ def Image_Processing(filelist):
|
|
78 |
red_edge = np.expand_dims(img_stack[:, :, 2], axis=-1)
|
79 |
NIR = np.expand_dims(img_stack[:, :, -1], axis=-1)
|
80 |
composite_img = np.concatenate((green, red_edge, red), axis=-1) * 255
|
81 |
-
gray_img, binary = CCA_Preprocess(composite_img, k=k)
|
82 |
|
83 |
-
preprocessed_img = np.repeat(np.expand_dims(binary, axis=-1), 3, axis=-1) * composite_img
|
84 |
# Perform Min-Max normalization
|
85 |
# new approach (line 192,193,194)
|
86 |
normalized_img = (composite_img - composite_img.min()) / (composite_img.max() - composite_img.min())
|
@@ -92,7 +92,11 @@ def Image_Processing(filelist):
|
|
92 |
stitched_image = image_stitching(color_images)
|
93 |
stitched_image = Image.fromarray(np.uint8(stitched_image))
|
94 |
stitched_cv_image = np.array(stitched_image.convert('RGB'))[:, :, ::-1].copy()
|
95 |
-
|
|
|
|
|
|
|
|
|
96 |
temp_stitched_save_path = 'temp_stitched_image.png'
|
97 |
stitched_image.save(temp_stitched_save_path)
|
98 |
result = detect_object(yolo_model, temp_stitched_save_path, confidence=0.128)
|
@@ -105,12 +109,30 @@ def Image_Processing(filelist):
|
|
105 |
overlayed_cv_image = np.array(overlayed_image.convert('RGB'))[:, :, ::-1].copy()
|
106 |
input_images = [(input_images[index],image_names[index]) for index in range(len(input_images))]
|
107 |
color_images = [(cv2.cvtColor(color_images[index], cv2.COLOR_BGR2RGB),image_names[index]) for index in range(len(color_images))]
|
108 |
-
return input_images,color_images,[(stitched_cv_image,'Processed & Stitched Color Image'),
|
|
|
|
|
109 |
|
110 |
-
file_input = gr.File(file_count="multiple",
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
-
iface = gr.Interface(fn = Image_Processing,
|
|
|
|
|
116 |
iface.launch(share=True)
|
|
|
78 |
red_edge = np.expand_dims(img_stack[:, :, 2], axis=-1)
|
79 |
NIR = np.expand_dims(img_stack[:, :, -1], axis=-1)
|
80 |
composite_img = np.concatenate((green, red_edge, red), axis=-1) * 255
|
81 |
+
# gray_img, binary = CCA_Preprocess(composite_img, k=k)
|
82 |
|
83 |
+
# preprocessed_img = np.repeat(np.expand_dims(binary, axis=-1), 3, axis=-1) * composite_img
|
84 |
# Perform Min-Max normalization
|
85 |
# new approach (line 192,193,194)
|
86 |
normalized_img = (composite_img - composite_img.min()) / (composite_img.max() - composite_img.min())
|
|
|
92 |
stitched_image = image_stitching(color_images)
|
93 |
stitched_image = Image.fromarray(np.uint8(stitched_image))
|
94 |
stitched_cv_image = np.array(stitched_image.convert('RGB'))[:, :, ::-1].copy()
|
95 |
+
gray_image,binary = CCA_Preprocess(stitched_cv_image,k=k)
|
96 |
+
preprocessed_img = np.repeat(np.expand_dims(binary, axis=-1), 3, axis=-1) * stitched_cv_image
|
97 |
+
normalized_preprocessed_img = (preprocessed_img - preprocessed_img.min()) / (preprocessed_img.max() - preprocessed_img.min())
|
98 |
+
normalized_preprocessed_img *= 255
|
99 |
+
normalized_preprocessed_img = normalized_preprocessed_img.astype(np.uint8)
|
100 |
temp_stitched_save_path = 'temp_stitched_image.png'
|
101 |
stitched_image.save(temp_stitched_save_path)
|
102 |
result = detect_object(yolo_model, temp_stitched_save_path, confidence=0.128)
|
|
|
109 |
overlayed_cv_image = np.array(overlayed_image.convert('RGB'))[:, :, ::-1].copy()
|
110 |
input_images = [(input_images[index],image_names[index]) for index in range(len(input_images))]
|
111 |
color_images = [(cv2.cvtColor(color_images[index], cv2.COLOR_BGR2RGB),image_names[index]) for index in range(len(color_images))]
|
112 |
+
return input_images,color_images,[(stitched_cv_image,'Processed & Stitched Color Image'),
|
113 |
+
(normalized_preprocessed_img, 'Foreground Image by Connected Component Analysis'),
|
114 |
+
(overlayed_cv_image,'Foreground Image by Segmentation')]
|
115 |
|
116 |
+
file_input = gr.File(file_count="multiple",
|
117 |
+
label = 'Upload Raw Input Images',
|
118 |
+
show_label = True)
|
119 |
+
gallery_raw_inputs = gr.Gallery(label = 'Input Raw Plant Images',
|
120 |
+
show_label = True,
|
121 |
+
height = 512,
|
122 |
+
allow_preview = True,
|
123 |
+
preview = True)
|
124 |
+
gallery_color_images = gr.Gallery(label = 'Preprocessed Color Plant Images',
|
125 |
+
show_label = True,
|
126 |
+
height = 512,
|
127 |
+
allow_preview = True,
|
128 |
+
preview = True)
|
129 |
+
gallery_output = gr.Gallery(label = 'Plant Analysis',
|
130 |
+
show_label = True,
|
131 |
+
height = 512,
|
132 |
+
allow_preview = True,
|
133 |
+
preview = True)
|
134 |
|
135 |
+
iface = gr.Interface(fn = Image_Processing,
|
136 |
+
inputs = file_input,
|
137 |
+
outputs = [gallery_raw_inputs,gallery_color_images,gallery_output])
|
138 |
iface.launch(share=True)
|