flinta commited on
Commit
27c9706
1 Parent(s): 0294293

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +77 -0
  2. best.pt +3 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+
5
+
6
+ torch.hub.download_url_to_file(
7
+ 'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg', 'one.jpg')
8
+ torch.hub.download_url_to_file(
9
+ 'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
10
+ torch.hub.download_url_to_file(
11
+ 'https://nssgroup.com/wp-content/uploads/2019/02/Building-maintenance-blog.jpg', 'three.jpg')
12
+
13
+
14
+ def yoloV8_func(image: gr.inputs.Image = None,
15
+ image_size: gr.inputs.Slider = 640,
16
+ conf_threshold: gr.inputs.Slider = 0.4,
17
+ iou_threshold: gr.inputs.Slider = 0.50):
18
+ """This function performs YOLOv8 object detection on the given image.
19
+
20
+ Args:
21
+ image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
22
+ image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
23
+ conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
24
+ iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
25
+ """
26
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
27
+ model_path = "best.pt"
28
+ model = YOLO(model_path)
29
+
30
+ # Perform object detection on the input image using the YOLOv8 model
31
+ results = model.predict(image,
32
+ conf=conf_threshold,
33
+ iou=iou_threshold,
34
+ imgsz=image_size)
35
+
36
+ # Print the detected objects' information (class, coordinates, and probability)
37
+ box = results[0].boxes
38
+ print("Object type:", box.cls)
39
+ print("Coordinates:", box.xyxy)
40
+ print("Probability:", box.conf)
41
+
42
+ # Render the output image with bounding boxes around detected objects
43
+ render = render_result(model=model, image=image, result=results[0])
44
+ return render
45
+
46
+
47
+ inputs = [
48
+ gr.inputs.Image(type="filepath", label="Input Image"),
49
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640,
50
+ step=32, label="Image Size"),
51
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.001,
52
+ step=0.05, label="Confidence Threshold"),
53
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.5,
54
+ step=0.05, label="IOU Threshold"),
55
+ ]
56
+
57
+
58
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
59
+
60
+ title = "YOLOv8 101: Custom Object Detection on Construction Workers"
61
+
62
+
63
+ examples = [['one.jpg', 640, 0.5, 0.7],
64
+ ['two.jpg', 800, 0.5, 0.6],
65
+ ['three.jpg', 900, 0.5, 0.8]]
66
+
67
+ yolo_app = gr.Interface(
68
+ fn=yoloV8_func,
69
+ inputs=inputs,
70
+ outputs=outputs,
71
+ title=title,
72
+ examples=examples,
73
+ cache_examples=True,
74
+ )
75
+
76
+ # Launch the Gradio interface in debug mode with queue enabled
77
+ yolo_app.launch(debug=True, enable_queue=True)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a981994dae3173e97639606e5045759ef84333492a82a1f3516840e5b0c017ba
3
+ size 6336985
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==3.39.0
2
+ torch
3
+ ultralyticsplus==0.0.28