majogamit commited on
Commit
6276dbc
1 Parent(s): 7c55879

Upload 18 files

Browse files
Files changed (3) hide show
  1. app.py +79 -66
  2. utils/__pycache__/plot.cpython-310.pyc +0 -0
  3. utils/plot.py +9 -2
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import cv2
2
  import gradio as gr
3
  import pandas as pd
@@ -32,15 +33,17 @@ css = """
32
  # Create the Gradio interface using defined theme and CSS
33
  with gr.Blocks(theme=theme, css=css) as demo:
34
  # Title and description for the app
35
- gr.Markdown("# Concrete Crack Detection and Segmentation")
36
- gr.Markdown("Upload concrete crack images and get segmented results.")
37
  with gr.Tab('Instructions'):
38
  gr.Markdown(
39
  """**Instructions for Concrete Crack Detection and Segmentation App:**
40
 
41
  **Input:**
42
  - Upload one or more concrete crack images using the "Image Input" section.
43
- - Adjust confidence level and distance sliders if needed.\n
 
 
44
  **Buttons:**
45
  - Click "Segment" to perform crack segmentation.
46
  - Click "Clear" to reset inputs and outputs.\n
@@ -51,7 +54,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
51
 
52
  **Additional Information:**
53
  - The app uses a YOLOv8 trained model for crack detection with 86.8\% accuracy.
54
- - Results include orientation category, width of the crack (widest), number of cracks per photo.
55
 
56
  **Notes:**
57
  - Ensure uploaded images are in the supported formats: PNG, JPG, JPEG, WEBP.
@@ -198,79 +201,89 @@ with gr.Blocks(theme=theme, css=css) as demo:
198
  filenames = [file.name for file in image]
199
  conf= conf * 0.01
200
  model = load_model()
201
- results = model.predict(image_list, conf=conf, save=True, project='output', name=uuid, stream=True)
202
  processed_image_paths = []
203
  output_image_paths = []
204
  result_list = []
205
  width_list = []
206
  orientation_list = []
207
  width_interpretations = []
 
208
  # Populate the dataframe with counts
209
- for i, r in enumerate(results):
210
- result_list.append(r)
211
- instance_count = len(r)
212
- if r.masks is not None and r.masks.data.numel() > 0:
213
- masks = r.masks.data
214
- boxes = r.boxes.data
215
- clss = boxes[:, 5]
216
- people_indices = torch.where(clss == 0)
217
- people_masks = masks[people_indices]
218
- people_mask = torch.any(people_masks, dim=0).int() * 255
219
- processed_image_path = str(model.predictor.save_dir / f'binarize{i}.jpg')
220
- cv2.imwrite(processed_image_path, people_mask.cpu().numpy())
221
- processed_image_paths.append(processed_image_path)
222
-
223
- crack_image_path = processed_image_path
224
- principal_orientation, orientation_category = detect_pattern(crack_image_path)
225
-
226
- # Print the results if needed
227
- print(f"Crack Detection Results for {crack_image_path}:")
228
- print("Principal Component Analysis Orientation:", principal_orientation)
229
- print("Orientation Category:", orientation_category)
230
-
231
- # Load the original image in color
232
- original_img = cv2.imread(f'output/{uuid}/image{i}.jpg')
233
- orig_image_path = str(model.predictor.save_dir / f'image{i}.jpg')
234
- processed_image_paths.append(orig_image_path)
235
- # Load and resize the binary image to match the dimensions of the original image
236
- binary_image = cv2.imread(f'output/{uuid}/binarize{i}.jpg', cv2.IMREAD_GRAYSCALE)
237
- binary_image = cv2.resize(binary_image, (original_img.shape[1], original_img.shape[0]))
238
-
239
- contour_analyzer = ContourAnalyzer()
240
- max_width, thickest_section, thickest_points, distance_transforms = contour_analyzer.find_contours(binary_image)
241
-
242
- visualized_image = original_img.copy()
243
- cv2.drawContours(visualized_image, [thickest_section], 0, (0, 255, 0), 1)
244
-
245
- contour_analyzer.draw_circle_on_image(visualized_image, (int(thickest_points[0]), int(thickest_points[1])), 5, (57, 255, 20), -1)
246
- print("Max Width in pixels: ", max_width)
247
-
248
- width = contour_analyzer.calculate_width(y=10, x=5, pixel_width=max_width, calibration_factor=0.001, distance=150)
249
- print("Max Width, converted: ", width)
250
-
251
- prets = pt.classify_wall_damage(width)
252
- width_interpretations.append(prets)
253
-
254
- visualized_image_path = f'output/{uuid}/visualized_image{i}.jpg'
255
- output_image_paths.append(visualized_image_path)
256
- cv2.imwrite(visualized_image_path, visualized_image)
257
-
258
- width_list.append(round(width, 2))
259
- orientation_list.append(orientation_category)
260
- else:
261
- original_img = cv2.imread(f'output/{uuid}/image{i}.jpg')
262
- visualized_image_path = f'output/{uuid}/visualized_image{i}.jpg'
263
- output_image_paths.append(visualized_image_path)
264
- cv2.imwrite(visualized_image_path, original_img)
265
- width_list.append('None')
266
- orientation_list.append('None')
267
- width_interpretations.append('None')
 
 
 
 
 
 
 
268
 
269
  # Delete binarized and initial segmented images after processing
270
  for path in processed_image_paths:
271
  if os.path.exists(path):
272
- os.remove(path)
273
-
 
 
274
  # results = gr.Textbox(res, visible=True)
275
  csv, df = pt.count_instance(result_list, filenames, uuid, width_list, orientation_list, output_image_paths, reference, remark, width_interpretations)
276
 
 
1
+ import shutil
2
  import cv2
3
  import gradio as gr
4
  import pandas as pd
 
33
  # Create the Gradio interface using defined theme and CSS
34
  with gr.Blocks(theme=theme, css=css) as demo:
35
  # Title and description for the app
36
+ gr.Markdown("# Concrete Crack Segmentation and Documentation")
37
+ gr.Markdown("Upload concrete crack images and get segmented results with pdf report.")
38
  with gr.Tab('Instructions'):
39
  gr.Markdown(
40
  """**Instructions for Concrete Crack Detection and Segmentation App:**
41
 
42
  **Input:**
43
  - Upload one or more concrete crack images using the "Image Input" section.
44
+ - Adjust confidence level and distance sliders if needed.
45
+ - Upload reference images. (e.g. whole wall with many cracks)
46
+ - Input Remarks (e.g. First floor wall on the left)\n
47
  **Buttons:**
48
  - Click "Segment" to perform crack segmentation.
49
  - Click "Clear" to reset inputs and outputs.\n
 
54
 
55
  **Additional Information:**
56
  - The app uses a YOLOv8 trained model for crack detection with 86.8\% accuracy.
57
+ - Results include orientation category, width of the crack (widest), number of cracks per photo, and damage level.
58
 
59
  **Notes:**
60
  - Ensure uploaded images are in the supported formats: PNG, JPG, JPEG, WEBP.
 
201
  filenames = [file.name for file in image]
202
  conf= conf * 0.01
203
  model = load_model()
204
+
205
  processed_image_paths = []
206
  output_image_paths = []
207
  result_list = []
208
  width_list = []
209
  orientation_list = []
210
  width_interpretations = []
211
+ folder_name = []
212
  # Populate the dataframe with counts
213
+ for i, image_path in enumerate(image):
214
+ results = model.predict(image_path, conf=conf, save=True, project='output', name=f'{uuid}{i}', stream=True)
215
+ for r in results:
216
+ result_list.append(r)
217
+ instance_count = len(r)
218
+ if r.masks is not None and r.masks.data.numel() > 0:
219
+ masks = r.masks.data
220
+ boxes = r.boxes.data
221
+ clss = boxes[:, 5]
222
+ people_indices = torch.where(clss == 0)
223
+ people_masks = masks[people_indices]
224
+ people_mask = torch.any(people_masks, dim=0).int() * 255
225
+ processed_image_path = str(f'output/{uuid}0/binarize{i}.jpg')
226
+ cv2.imwrite(processed_image_path, people_mask.cpu().numpy())
227
+ processed_image_paths.append(processed_image_path)
228
+
229
+ crack_image_path = processed_image_path
230
+ principal_orientation, orientation_category = detect_pattern(crack_image_path)
231
+
232
+ # Print the results if needed
233
+ print(f"Crack Detection Results for {crack_image_path}:")
234
+ print("Principal Component Analysis Orientation:", principal_orientation)
235
+ print("Orientation Category:", orientation_category)
236
+ if i>0:
237
+ processed_image_paths.append(f'output/{uuid}{i}')
238
+ #transfer item to current folder
239
+ the_paths = f'output/{uuid}{i}/{os.path.basename(image_path)}'
240
+ print(the_paths)
241
+ shutil.copyfile(the_paths, f'output/{uuid}0/image{i}.jpg')
242
+ # Load the original image in color
243
+ original_img = cv2.imread(f'output/{uuid}0/image{i}.jpg')
244
+ orig_image_path = str(f'output/{uuid}0/image{i}.jpg')
245
+ processed_image_paths.append(orig_image_path)
246
+ # Load and resize the binary image to match the dimensions of the original image
247
+ binary_image = cv2.imread(f'output/{uuid}0/binarize{i}.jpg', cv2.IMREAD_GRAYSCALE)
248
+ binary_image = cv2.resize(binary_image, (original_img.shape[1], original_img.shape[0]))
249
+
250
+ contour_analyzer = ContourAnalyzer()
251
+ max_width, thickest_section, thickest_points, distance_transforms = contour_analyzer.find_contours(binary_image)
252
+
253
+ visualized_image = original_img.copy()
254
+ cv2.drawContours(visualized_image, [thickest_section], 0, (0, 255, 0), 1)
255
+
256
+ contour_analyzer.draw_circle_on_image(visualized_image, (int(thickest_points[0]), int(thickest_points[1])), 5, (57, 255, 20), -1)
257
+ print("Max Width in pixels: ", max_width)
258
+
259
+ width = contour_analyzer.calculate_width(y=10, x=5, pixel_width=max_width, calibration_factor=0.001, distance=150)
260
+ print("Max Width, converted: ", width)
261
+
262
+ prets = pt.classify_wall_damage(width)
263
+ width_interpretations.append(prets)
264
+
265
+ visualized_image_path = f'output/{uuid}0/visualized_image{i}.jpg'
266
+ output_image_paths.append(visualized_image_path)
267
+ cv2.imwrite(visualized_image_path, visualized_image)
268
+
269
+ width_list.append(round(width, 2))
270
+ orientation_list.append(orientation_category)
271
+ else:
272
+ original_img = cv2.imread(f'output/{uuid}0/image{i}.jpg')
273
+ visualized_image_path = f'output/{uuid}0/visualized_image{i}.jpg'
274
+ output_image_paths.append(visualized_image_path)
275
+ cv2.imwrite(visualized_image_path, original_img)
276
+ width_list.append('None')
277
+ orientation_list.append('None')
278
+ width_interpretations.append('None')
279
 
280
  # Delete binarized and initial segmented images after processing
281
  for path in processed_image_paths:
282
  if os.path.exists(path):
283
+ if os.path.isfile(path):
284
+ os.remove(path)
285
+ elif os.path.isdir(path):
286
+ shutil.rmtree(path)
287
  # results = gr.Textbox(res, visible=True)
288
  csv, df = pt.count_instance(result_list, filenames, uuid, width_list, orientation_list, output_image_paths, reference, remark, width_interpretations)
289
 
utils/__pycache__/plot.cpython-310.pyc CHANGED
Binary files a/utils/__pycache__/plot.cpython-310.pyc and b/utils/__pycache__/plot.cpython-310.pyc differ
 
utils/plot.py CHANGED
@@ -78,6 +78,8 @@ def count_instance(result, filenames, uuid, width_list, orientation_list, image_
78
  tuple: Path to the generated CSV and dataframe with counts.
79
  """
80
  # Initializing the dataframe
 
 
81
  print(damage)
82
  data = {
83
  'Index': [],
@@ -177,8 +179,13 @@ def count_instance(result, filenames, uuid, width_list, orientation_list, image_
177
 
178
  merger.write(f'output/{uuid}/report.pdf')
179
  merger.close()
180
- options = {'width': 1280, 'disable-smart-width': ''}
181
- imgkit.from_file(f'output/{uuid}/out.html', f'output/{uuid}/out.jpg', options=opt)
 
 
 
 
 
182
  return f'output/{uuid}/report.pdf', df
183
 
184
 
 
78
  tuple: Path to the generated CSV and dataframe with counts.
79
  """
80
  # Initializing the dataframe
81
+ uuid= f'{uuid}0'
82
+ print(uuid)
83
  print(damage)
84
  data = {
85
  'Index': [],
 
179
 
180
  merger.write(f'output/{uuid}/report.pdf')
181
  merger.close()
182
+
183
+ paths = [f'output/{uuid}/df_batch.html', f'output/{uuid}/df_ref_summary.html',
184
+ f'output/{uuid}/df_ref.html', f'output/{uuid}/out.html',
185
+ f'output/{uuid}/report_batch.pdf', f'output/{uuid}/report_ref.pdf']
186
+ for path in paths:
187
+ if os.path.exists(path):
188
+ os.remove(path)
189
  return f'output/{uuid}/report.pdf', df
190
 
191