umutkavakli commited on
Commit
e04b6d9
1 Parent(s): adb5729

initial files added

Browse files
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from detection import ObjectDetection
3
+
4
+ def get_predictions(img, threshold, box_color, text_color):
5
+ v3_results = yolov3_detector.score_frame(img)
6
+ v5_results = yolov5_detector.score_frame(img)
7
+ v8_results = yolov8_detector.v8_score_frame(img)
8
+
9
+ v3_frame = yolov3_detector.plot_bboxes(v3_results, img, float(threshold), box_color, text_color)
10
+ v5_frame = yolov5_detector.plot_bboxes(v5_results, img, float(threshold), box_color, text_color)
11
+ v8_frame = yolov8_detector.plot_bboxes(v8_results, img, float(threshold), box_color, text_color)
12
+
13
+
14
+ return v3_frame, v5_frame, v8_frame
15
+
16
+
17
+ with gr.Blocks(title="Leaf Disease Detection", theme=gr.themes.Monochrome()) as interface:
18
+ gr.Markdown("# Leaf Disease Detection")
19
+ with gr.Row():
20
+ with gr.Column():
21
+ image = gr.Image(shape=(416,416), label="Input Image")
22
+ with gr.Column():
23
+ with gr.Row():
24
+ with gr.Column():
25
+ box_color = gr.ColorPicker(label="Box Color", value="#0000ff")
26
+ with gr.Column():
27
+ text_color = gr.ColorPicker(label="Prediction Color", value="#ff0000")
28
+
29
+ confidence = gr.Slider(maximum=1, step=0.01, value=0.4, label="Confidence Threshold", interactive=True)
30
+ btn = gr.Button("Detect")
31
+
32
+ with gr.Row():
33
+ with gr.Box():
34
+ v3_prediction = gr.Image(shape=(416,416), label="YOLOv3")
35
+ with gr.Box():
36
+ v5_prediction = gr.Image(shape=(416,416), label="YOLOv5")
37
+ with gr.Box():
38
+ v8_prediction = gr.Image(shape=(416,416), label="YOLOv8")
39
+
40
+ btn.click(
41
+ get_predictions,
42
+ [image, confidence, box_color, text_color],
43
+ [v3_prediction, v5_prediction, v8_prediction]
44
+ )
45
+
46
+
47
+ yolov3_detector = ObjectDetection('yolov3')
48
+ yolov5_detector = ObjectDetection('yolov5')
49
+ yolov8_detector = ObjectDetection('yolov8')
50
+
51
+ interface.launch()
52
+
detection.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import cv2
3
+ from PIL import ImageColor
4
+ from ultralytics import YOLO
5
+
6
+
7
+ class ObjectDetection:
8
+ def __init__(self, model_name='yolov3'):
9
+ self.model_name = model_name
10
+ self.model = self.load_model()
11
+ self.classes = self.model.names
12
+ self.device = 'cpu'
13
+
14
+ def load_model(self):
15
+ if self.model_name != 'yolov8':
16
+ model = torch.hub.load(f"ultralytics/{self.model_name}", 'custom', path=f"weights/{self.model_name}_best.pt", force_reload=True)
17
+ else:
18
+ model = YOLO(f"weights/{self.model_name}_best.pt")
19
+
20
+ return model
21
+
22
+ def score_frame(self, frame):
23
+ results = self.model(frame)
24
+
25
+ labels, conf, coord = results.xyxyn[0][:, -1], results.xyxyn[0][:, -2], results.xyxyn[0][:, :-1]
26
+ return labels, conf, coord
27
+
28
+
29
+ def v8_score_frame(self, frame):
30
+ results = self.model(frame)
31
+
32
+ labels = []
33
+ confidences = []
34
+ coords = []
35
+
36
+ for result in results:
37
+ boxes = result.boxes.cpu().numpy()
38
+
39
+ label = boxes.cls
40
+ conf = boxes.conf
41
+ coord = boxes.xyxy
42
+
43
+ labels.extend(label)
44
+ confidences.extend(conf)
45
+ coords.extend(coord)
46
+
47
+ return labels, confidences, coords
48
+
49
+ def get_coords(self, frame, row):
50
+
51
+ if self.model_name != 'yolov8':
52
+ x_shape, y_shape = frame.shape[1], frame.shape[0]
53
+ return int(row[0] * x_shape), int(row[1] * y_shape), int(row[2] * x_shape), int(row[3] * y_shape)
54
+ else:
55
+ return int(row[0]), int(row[1]), int(row[2]), int(row[3])
56
+
57
+ def class_to_label(self, x):
58
+ return self.classes[int(x)]
59
+
60
+ def get_color(self, code):
61
+ rgb = ImageColor.getcolor(code, "RGB")
62
+ return rgb
63
+
64
+ def plot_bboxes(self, results, frame, threshold, box_color, text_color):
65
+ labels, conf, coord = results
66
+ n = len(labels)
67
+
68
+ frame = frame.copy()
69
+ box_color = self.get_color(box_color)
70
+ text_color = self.get_color(text_color)
71
+
72
+ for i in range(n):
73
+ row = coord[i]
74
+ if conf[i] >= threshold:
75
+ x1, y1, x2, y2 = self.get_coords(frame, row)
76
+ class_name = self.class_to_label(labels[i])
77
+
78
+ cv2.rectangle(frame, (x1, y1), (x2, y2), box_color, 2)
79
+ cv2.putText(frame, f"{class_name} - {conf[i]*100:.2f}%", (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.5, text_color)
80
+
81
+ return frame
82
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ cv2
3
+ PIL
4
+ ultralytics
5
+ gradio
weights/yolov3_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de10bcf127d161a673b004e0972c36af506299e14d261b9a0fcdb7ae9fbe77b0
3
+ size 93039765
weights/yolov5_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d41071f2ece081a264deac85cd7c6526c814627ef2e6c34137242947c3a33e
3
+ size 93039637
weights/yolov8_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daed75e7e8751a6a0a5e9aabc77556fa7367de4d4e2511448648232aeb43842e
3
+ size 87651208