isunnyrock commited on
Commit
ebc79d8
1 Parent(s): 6bb9cf8

Display updates to app file

Browse files
Files changed (1) hide show
  1. app.py +103 -134
app.py CHANGED
@@ -51,7 +51,7 @@ import tensorflow_hub as hub
51
 
52
 
53
  @st.cache_resource
54
- def load_models_etc():
55
  #OpenAI elements
56
  #secrets = toml.load(".vscode/streamlit/secrets.toml")
57
  #client_d = OpenAI(api_key = secrets["OPENAI_API_KEY"])
@@ -94,7 +94,8 @@ def load_models_etc():
94
  return client_d,detector_d,Dis_percentage_d,Details_d,cnn_model_d,xception_model_d,mobilenet_model_d,class_labels_d
95
 
96
 
97
- client,detector,Dis_percentage,Details,cnn_model,xception_model,mobilenet_model,class_labels = load_models_etc()
 
98
 
99
 
100
  # Identify extent of spot or lesion coverage on leaf
@@ -126,7 +127,7 @@ def identify_spots_or_lesions(img):
126
  contoured_image = cv2.drawContours(cv_image.copy(), filtered_contours, -1, (0, 255, 0), 1)
127
 
128
  # Visualization
129
- plt.figure(figsize=(25, 8))
130
  plt.subplot(1, 5, 1)
131
  plt.imshow(cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB))
132
  plt.title('Original Image')
@@ -146,7 +147,9 @@ def identify_spots_or_lesions(img):
146
  plt.subplot(1, 5, 5)
147
  plt.imshow(cv2.cvtColor(contoured_image, cv2.COLOR_BGR2RGB))
148
  plt.title('Spots or Lesions Identified')
149
- plt.show()
 
 
150
  return(percentage_spots)
151
 
152
 
@@ -216,12 +219,6 @@ def classify_image(image):
216
  # preprocess_input from Xception to scale the image to -1 to +1
217
  #img_array = preprocess_input(img_array)
218
 
219
- # Perform inference using the TensorFlow Lite model
220
- #interpreter.set_tensor(interpreter.get_input_details()[0]['index'], img_array)
221
- #interpreter.invoke()
222
- #prediction = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
223
-
224
-
225
  mobilenet_input = mobilenet_preprocess(np.copy(img_batch))
226
  xception_input = xception_preprocess(np.copy(img_batch))
227
  cnn_input = img_batch / 255.0 # normalization for generic CNN model
@@ -231,7 +228,6 @@ def classify_image(image):
231
  xception_preds = xception_model(xception_input, training = False)
232
  cnn_preds = cnn_model(cnn_input, training = False)
233
 
234
-
235
  # Get the most likely class index from predictions
236
  mobilenet_class = np.argmax(mobilenet_preds, axis=1)
237
  xception_class = np.argmax(xception_preds, axis=1)
@@ -250,14 +246,10 @@ def classify_image(image):
250
  second_class_prob = averaged_probs_np[np.arange(top_class_index.size), second_class_index]
251
  predicted_class_name = class_labels[top_class_index[0]]
252
  second_class_name = class_labels[second_class_index[0]]
253
-
254
  # --------------------------------
255
 
256
- if top_class_prob[0] < 0.999: # threshold close to 1 to handle floating-point precision issues
257
- st.write("Second predicted class:", second_class_name)
258
- st.write(f"Second class confidence: {second_class_prob[0]:.3%}")
259
- else:
260
- st.write("Second predicted class: None")
261
 
262
  if "healthy" in predicted_class_name:
263
  st.write(f"{predicted_class_name} is healthy, skipping further analysis.")
@@ -269,7 +261,6 @@ def classify_image(image):
269
  else:
270
  spots_percentage = identify_spots_or_lesions(image)
271
 
272
-
273
  if predicted_class_name in Dis_percentage['Plant'].values:
274
  row = Dis_percentage.loc[Dis_percentage['Plant'] == predicted_class_name].iloc[0]
275
  severity_disease = plot_dis_percentage(row, spots_percentage)
@@ -283,15 +274,96 @@ def classify_image(image):
283
  return severity_disease, top_class_prob[0], second_class_name
284
  else:
285
  st.write("No data available for this plant disease in DataFrame.")
286
- return
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
 
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  # Streamlit app
292
 
293
- tab1, tab2, tab3 = st.tabs(["Home", "Solution", "Team"])
294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
 
297
  #First Tab: Title of Application and description
@@ -315,20 +387,17 @@ Early detection of plant diseases is paramount for farmers to protect their crop
315
 
316
 
317
 
318
-
319
  #Second Tab: Image upload and disease detection and remidy susgestions
320
  with tab2:
321
  st.title("Plant classification, Disease detection and management")
322
-
323
  # Load and display the image
324
  uploaded_file = st.file_uploader("Upload Leaf Image...", type=["jpg", "jpeg", "png"], key="uploader")
325
 
326
-
327
  if uploaded_file is not None:
328
-
329
  print("Image successfully uploaded!")
330
  # Read the uploaded image file
331
- st.image(uploaded_file, caption='Uploaded Image', use_column_width=True)
 
332
  image = Image.open(uploaded_file)
333
 
334
  image_for_drawing = image.copy()
@@ -344,22 +413,24 @@ with tab2:
344
  result = {key: value.numpy() for key, value in result.items()}
345
 
346
  #st.write("Found %d objects." % len(result["detection_scores"]))
347
- st.write("Inference time: ", end_time - start_time)
348
 
349
  detection_scores = result["detection_scores"]
350
  detection_class_entities = result["detection_class_entities"]
351
 
352
- #image_with_boxes = draw_boxes(image_for_drawing, result["detection_boxes"],detection_class_entities, detection_scores)
353
-
354
  #display_image(image_with_boxes)
 
355
 
356
  top_3_idx = np.argsort(-detection_scores)[:3]
357
 
358
  for idx in top_3_idx:
359
  entity = detection_class_entities[idx].decode('utf-8')
 
360
  if "Plant" == entity:
361
  plant_score = detection_scores[idx]
362
- st.write(f"Plant probability score: {plant_score:.2%}")
363
  result1 = classify_image(image)
364
 
365
  if result1 is not None:
@@ -367,22 +438,12 @@ with tab2:
367
  new1 = result1[0] + ""
368
  newresult = new1.replace("_"," ")
369
  newresult2 = newresult.replace("-"," ")
370
- st.markdown("Searching disease management steps for " + ":red[" + newresult2 + "]... :eyes:")
371
- completion = client.chat.completions.create(
372
- model="gpt-4-turbo",
373
- messages=[
374
- {"role": "user", "content": "List out the most relevant remediation steps for " + newresult2 + " in 7 bullet points"}
375
- ],
376
- temperature=0.1,
377
- max_tokens=2000,
378
- top_p=0.1
379
- )
380
- st.markdown(completion.choices[0].message.content)
381
- #st.markdown(completion.choices[0].delta.content)
382
  else:
383
  print("No file uploaded.")
384
 
385
-
386
  # Disclaimer
387
  st.write("""
388
  ### Disclaimer
@@ -394,9 +455,6 @@ While our disease identification system strives for accuracy and reliability, it
394
 
395
 
396
 
397
-
398
-
399
-
400
  # Third Tab
401
  with tab3:
402
  st.title("CDS Batch 6 - Group 2:")
@@ -428,92 +486,3 @@ with tab3:
428
 
429
  st.write("Shashank Srivastava")
430
  st.divider()
431
-
432
-
433
-
434
-
435
-
436
- def display_image(image):
437
- fig = plt.figure(figsize=(12, 6))
438
- plt.grid(False)
439
- plt.imshow(image)
440
- plt.show()
441
-
442
-
443
- def draw_bounding_box_on_image(image,
444
- ymin,
445
- xmin,
446
- ymax,
447
- xmax,
448
- color,
449
- font,
450
- thickness=4,
451
- display_str_list=()):
452
- """Adds a bounding box to an image."""
453
- draw = ImageDraw.Draw(image)
454
- im_width, im_height = image.size
455
- (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
456
- ymin * im_height, ymax * im_height)
457
- draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
458
- (left, top)],
459
- width=thickness,
460
- fill=color)
461
-
462
- # height of the display strings added to the top of the bounding
463
- # box exceeds the top of the image - stack below:
464
- display_str_heights = [font.getbbox(ds)[3] for ds in display_str_list]
465
- total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
466
-
467
- if top > total_display_str_height:
468
- text_bottom = top
469
- else:
470
- text_bottom = top + total_display_str_height
471
- # Reverse list and print from bottom to top.
472
- for display_str in display_str_list[::-1]:
473
- bbox = font.getbbox(display_str)
474
- text_width, text_height = bbox[2], bbox[3]
475
- margin = np.ceil(0.05 * text_height)
476
- draw.rectangle([(left, text_bottom - text_height - 2 * margin),
477
- (left + text_width, text_bottom)],
478
- fill=color)
479
- draw.text((left + margin, text_bottom - text_height - margin),
480
- display_str,
481
- fill="black",
482
- font=font)
483
- text_bottom -= text_height - 2 * margin
484
-
485
-
486
-
487
- def draw_boxes(image, boxes, class_names, scores, max_boxes=3, min_score=0.1):
488
- """Overlay labeled boxes on an image with formatted scores and label names."""
489
- colors = list(ImageColor.colormap.values())
490
-
491
- try:
492
- font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf", 15)
493
- except IOError:
494
- print("Font not found, using default font.")
495
- font = ImageFont.load_default()
496
-
497
- # Prepare a list of all detections that meet the score threshold
498
- filtered_boxes = [(boxes[i], scores[i], class_names[i]) for i in range(len(scores)) if scores[i] >= min_score]
499
-
500
- # Sort detections based on scores in descending order
501
- filtered_boxes.sort(key=lambda x: x[1], reverse=False)
502
-
503
- # Process each box to draw (limited by max_boxes)
504
- for i, (box, score, class_name) in enumerate(filtered_boxes[:max_boxes]):
505
- ymin, xmin, ymax, xmax = tuple(box)
506
- display_str = "{}: {:.2f}%".format(class_name.decode("ascii"), score * 100)
507
- color = colors[hash(class_name) % len(colors)]
508
- draw_bounding_box_on_image(
509
- image,
510
- ymin,
511
- xmin,
512
- ymax,
513
- xmax,
514
- color,
515
- font,
516
- display_str_list=[display_str])
517
-
518
- # Convert PIL Image back to numpy array for display (if necessary)
519
- return np.array(image) if isinstance(image, Image.Image) else image
 
51
 
52
 
53
  @st.cache_resource
54
+ def load_models():
55
  #OpenAI elements
56
  #secrets = toml.load(".vscode/streamlit/secrets.toml")
57
  #client_d = OpenAI(api_key = secrets["OPENAI_API_KEY"])
 
94
  return client_d,detector_d,Dis_percentage_d,Details_d,cnn_model_d,xception_model_d,mobilenet_model_d,class_labels_d
95
 
96
 
97
+ # Loading the models. load_models() methos is cached and will be loaded only once during the initial boot.
98
+ client,detector,Dis_percentage,Details,cnn_model,xception_model,mobilenet_model,class_labels = load_models()
99
 
100
 
101
  # Identify extent of spot or lesion coverage on leaf
 
127
  contoured_image = cv2.drawContours(cv_image.copy(), filtered_contours, -1, (0, 255, 0), 1)
128
 
129
  # Visualization
130
+ mfig = plt.figure(figsize=(25, 8))
131
  plt.subplot(1, 5, 1)
132
  plt.imshow(cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB))
133
  plt.title('Original Image')
 
147
  plt.subplot(1, 5, 5)
148
  plt.imshow(cv2.cvtColor(contoured_image, cv2.COLOR_BGR2RGB))
149
  plt.title('Spots or Lesions Identified')
150
+ #plt.show()
151
+ st.pyplot(mfig)
152
+
153
  return(percentage_spots)
154
 
155
 
 
219
  # preprocess_input from Xception to scale the image to -1 to +1
220
  #img_array = preprocess_input(img_array)
221
 
 
 
 
 
 
 
222
  mobilenet_input = mobilenet_preprocess(np.copy(img_batch))
223
  xception_input = xception_preprocess(np.copy(img_batch))
224
  cnn_input = img_batch / 255.0 # normalization for generic CNN model
 
228
  xception_preds = xception_model(xception_input, training = False)
229
  cnn_preds = cnn_model(cnn_input, training = False)
230
 
 
231
  # Get the most likely class index from predictions
232
  mobilenet_class = np.argmax(mobilenet_preds, axis=1)
233
  xception_class = np.argmax(xception_preds, axis=1)
 
246
  second_class_prob = averaged_probs_np[np.arange(top_class_index.size), second_class_index]
247
  predicted_class_name = class_labels[top_class_index[0]]
248
  second_class_name = class_labels[second_class_index[0]]
 
249
  # --------------------------------
250
 
251
+ st.write("Image class:", predicted_class_name)
252
+ st.write(f"Confidence: {top_class_prob[0]:.2%}")
 
 
 
253
 
254
  if "healthy" in predicted_class_name:
255
  st.write(f"{predicted_class_name} is healthy, skipping further analysis.")
 
261
  else:
262
  spots_percentage = identify_spots_or_lesions(image)
263
 
 
264
  if predicted_class_name in Dis_percentage['Plant'].values:
265
  row = Dis_percentage.loc[Dis_percentage['Plant'] == predicted_class_name].iloc[0]
266
  severity_disease = plot_dis_percentage(row, spots_percentage)
 
274
  return severity_disease, top_class_prob[0], second_class_name
275
  else:
276
  st.write("No data available for this plant disease in DataFrame.")
 
277
 
278
+ if top_class_prob[0] < 0.999: # threshold close to 1 to handle floating-point precision issues
279
+ st.write("Second predicted class:", second_class_name)
280
+ st.write(f"Second class confidence: {second_class_prob[0]:.3%}")
281
+ else:
282
+ st.write("Second predicted class: None")
283
+
284
+ return
285
+
286
+
287
+ def display_image(image):
288
+ fig = plt.figure(figsize=(12, 6))
289
+ plt.grid(False)
290
+ plt.imshow(image)
291
+ plt.show()
292
 
293
 
294
 
295
+ def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color, font, thickness=4, display_str_list=()):
296
+ """Adds a bounding box to an image."""
297
+ draw = ImageDraw.Draw(image)
298
+ im_width, im_height = image.size
299
+ (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
300
+ draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color)
301
+
302
+ # height of the display strings added to the top of the bounding
303
+ # box exceeds the top of the image - stack below:
304
+ display_str_heights = [font.getbbox(ds)[3] for ds in display_str_list]
305
+ total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
306
+
307
+ if top > total_display_str_height:
308
+ text_bottom = top
309
+ else:
310
+ text_bottom = top + total_display_str_height
311
+ # Reverse list and print from bottom to top.
312
+ for display_str in display_str_list[::-1]:
313
+ bbox = font.getbbox(display_str)
314
+ text_width, text_height = bbox[2], bbox[3]
315
+ margin = np.ceil(0.05 * text_height)
316
+ draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color)
317
+ draw.text((left + margin, text_bottom - text_height - margin), display_str, fill="black", font=font)
318
+ text_bottom -= text_height - 2 * margin
319
+
320
+
321
+
322
+ def draw_boxes(image, boxes, class_names, scores, max_boxes=3, min_score=0.1):
323
+ #"""Overlay labeled boxes on an image with formatted scores and label names."""
324
+ colors = list(ImageColor.colormap.values())
325
+
326
+ font = ImageFont.load_default()
327
+
328
+ # Prepare a list of all detections that meet the score threshold
329
+ filtered_boxes = [(boxes[i], scores[i], class_names[i]) for i in range(len(scores)) if scores[i] >= min_score]
330
+
331
+ # Sort detections based on scores in descending order
332
+ filtered_boxes.sort(key=lambda x: x[1], reverse=False)
333
+
334
+ # Process each box to draw (limited by max_boxes)
335
+ for i, (box, score, class_name) in enumerate(filtered_boxes[:max_boxes]):
336
+ ymin, xmin, ymax, xmax = tuple(box)
337
+ display_str = "{}: {:.2f}%".format(class_name.decode("ascii"), score * 100)
338
+ color = colors[hash(class_name) % len(colors)]
339
+ draw_bounding_box_on_image( image, ymin, xmin, ymax, xmax, color, font, display_str_list=[display_str])
340
+
341
+ # Convert PIL Image back to numpy array for display (if necessary)
342
+ return np.array(image) if isinstance(image, Image.Image) else image
343
+
344
+
345
+
346
+
347
+ # ----------------------------------------------------------------------------------------------------//
348
  # Streamlit app
349
 
 
350
 
351
+ def openai_remedy(searchval):
352
+ completion = client.chat.completions.create(
353
+ model="gpt-4-turbo",
354
+ messages=[
355
+ {"role": "user", "content": "List out the most relevant remediation steps for " + searchval + " in 7 bullet points"}
356
+ ],
357
+ temperature=0.1,
358
+ max_tokens=2000,
359
+ top_p=0.1
360
+ )
361
+ st.markdown(completion.choices[0].message.content)
362
+ #st.markdown(completion.choices[0].delta.content)
363
+ return
364
+
365
+
366
+ tab1, tab2, tab3 = st.tabs(["Home", "Solution", "Team"])
367
 
368
 
369
  #First Tab: Title of Application and description
 
387
 
388
 
389
 
 
390
  #Second Tab: Image upload and disease detection and remidy susgestions
391
  with tab2:
392
  st.title("Plant classification, Disease detection and management")
 
393
  # Load and display the image
394
  uploaded_file = st.file_uploader("Upload Leaf Image...", type=["jpg", "jpeg", "png"], key="uploader")
395
 
 
396
  if uploaded_file is not None:
 
397
  print("Image successfully uploaded!")
398
  # Read the uploaded image file
399
+ #st.image(uploaded_file, caption='Uploaded Image', use_column_width=True,width=100)
400
+ st.image(uploaded_file, caption='Uploaded Image', width=300)
401
  image = Image.open(uploaded_file)
402
 
403
  image_for_drawing = image.copy()
 
413
  result = {key: value.numpy() for key, value in result.items()}
414
 
415
  #st.write("Found %d objects." % len(result["detection_scores"]))
416
+ #st.write("Inference time: ", end_time - start_time)
417
 
418
  detection_scores = result["detection_scores"]
419
  detection_class_entities = result["detection_class_entities"]
420
 
421
+ # Class Detections displays
422
+ image_with_boxes = draw_boxes(image_for_drawing, result["detection_boxes"],detection_class_entities, detection_scores)
423
  #display_image(image_with_boxes)
424
+ st.image(image_with_boxes, caption='Uploaded Image', width=300)
425
 
426
  top_3_idx = np.argsort(-detection_scores)[:3]
427
 
428
  for idx in top_3_idx:
429
  entity = detection_class_entities[idx].decode('utf-8')
430
+
431
  if "Plant" == entity:
432
  plant_score = detection_scores[idx]
433
+ st.write(f"Plant Probability score using Faster R-CNN Inception Resnet V2 Object detection model : {plant_score:.2%}")
434
  result1 = classify_image(image)
435
 
436
  if result1 is not None:
 
438
  new1 = result1[0] + ""
439
  newresult = new1.replace("_"," ")
440
  newresult2 = newresult.replace("-"," ")
441
+ st.markdown("Fetching disease management steps for " + ":red[" + newresult2 + "]... :eyes:")
442
+ openai_remedy(newresult2)
443
+
 
 
 
 
 
 
 
 
 
444
  else:
445
  print("No file uploaded.")
446
 
 
447
  # Disclaimer
448
  st.write("""
449
  ### Disclaimer
 
455
 
456
 
457
 
 
 
 
458
  # Third Tab
459
  with tab3:
460
  st.title("CDS Batch 6 - Group 2:")
 
486
 
487
  st.write("Shashank Srivastava")
488
  st.divider()