DanMa16 commited on
Commit
3df9eb6
1 Parent(s): c8f456a

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +65 -0
  2. best.pt +3 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+
5
+
6
+ def yoloV8_func(image: gr.inputs.Image = None,
7
+ image_size: gr.inputs.Slider = 640,
8
+ conf_threshold: gr.inputs.Slider = 0.4,
9
+ iou_threshold: gr.inputs.Slider = 0.50):
10
+ """This function performs YOLOv8 object detection on the given image.
11
+
12
+ Args:
13
+ image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
14
+ image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
15
+ conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
16
+ iou_threshold (gr.inputs.Slider, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
17
+ """
18
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
19
+ model_path = "best.pt"
20
+ model = YOLO(model_path)
21
+
22
+ # Perform object detection on the input image using the YOLOv8 model
23
+ results = model.predict(image,
24
+ conf=conf_threshold,
25
+ iou=iou_threshold,
26
+ imgsz=image_size)
27
+
28
+ # Print the detected objects' information (class, coordinates, and probability)
29
+ box = results[0].boxes
30
+ print("Object type:", box.cls)
31
+ print("Coordinates:", box.xyxy)
32
+ print("Probability:", box.conf)
33
+
34
+ # Render the output image with bounding boxes around detected objects
35
+ render = render_result(model=model, image=image, result=results[0])
36
+ return render
37
+
38
+
39
+ inputs = [
40
+ gr.inputs.Image(type="filepath", label="Input Image"),
41
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640,
42
+ step=32, label="Image Size"),
43
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25,
44
+ step=0.05, label="Confidence Threshold"),
45
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45,
46
+ step=0.05, label="IOU Threshold"),
47
+ ]
48
+
49
+
50
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
51
+
52
+ title = "Water Meter"
53
+
54
+
55
+
56
+ yolo_app = gr.Interface(
57
+ fn=yoloV8_func,
58
+ inputs=inputs,
59
+ outputs=outputs,
60
+ title=title,
61
+ cache_examples=True,
62
+ )
63
+
64
+ # Launch the Gradio interface in debug mode with queue enabled
65
+ yolo_app.launch(debug=True, enable_queue=True)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ea51ba9655508bab9a2e82da23fdbbe3bcbb5d2f792e73860386e7bdcdf430e
3
+ size 89573453
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==3.50.2
2
+ torch==2.1.0+cu121
3
+ ultralyticsplus==0.0.28