Fixed it for you. Say thank you.

#1
Files changed (1) hide show
  1. app.py +106 -169
app.py CHANGED
@@ -1,181 +1,118 @@
1
- # import gradio as gr
2
- # import spaces
3
- # from huggingface_hub import hf_hub_download
 
4
 
 
 
 
5
 
6
- # def download_models(model_id):
7
- # hf_hub_download("SakshiRathi77/void-space-detection", filename=f"{model_id}", local_dir=f"./")
8
- # return f"./{model_id}"
9
 
10
- # @spaces.GPU
11
- # def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
12
- # """
13
- # Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
14
- # the input size and apply test time augmentation.
15
 
16
- # :param model_path: Path to the YOLOv9 model file.
17
- # :param conf_threshold: Confidence threshold for NMS.
18
- # :param iou_threshold: IoU threshold for NMS.
19
- # :param img_path: Path to the image file.
20
- # :param size: Optional, input size for inference.
21
- # :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
22
- # """
23
- # # Import YOLOv9
24
- # import yolov9
25
 
26
- # # Load the model
27
- # model_path = download_models(model_id)
28
- # model = yolov9.load(model_path, device="cuda:0")
29
 
30
- # # Set model parameters
31
- # model.conf = conf_threshold
32
- # model.iou = iou_threshold
33
 
34
- # # Perform inference
35
- # results = model(img_path, size=image_size)
36
 
37
- # # Optionally, show detection bounding boxes on image
38
- # output = results.render()
39
 
40
- # return output[0]
41
-
42
-
43
- # def app():
44
- # with gr.Blocks():
45
- # with gr.Row():
46
- # with gr.Column():
47
- # img_path = gr.Image(type="filepath", label="Image")
48
- # model_path = gr.Dropdown(
49
- # label="Model",
50
- # choices=[
51
- # "state_dict.pt"
52
- # ],
53
- # value="state_dict.pt",
54
- # )
55
- # image_size = gr.Slider(
56
- # label="Image Size",
57
- # minimum=320,
58
- # maximum=1280,
59
- # step=32,
60
- # value=640,
61
- # )
62
- # conf_threshold = gr.Slider(
63
- # label="Confidence Threshold",
64
- # minimum=0.1,
65
- # maximum=1.0,
66
- # step=0.1,
67
- # value=0.4,
68
- # )
69
- # iou_threshold = gr.Slider(
70
- # label="IoU Threshold",
71
- # minimum=0.1,
72
- # maximum=1.0,
73
- # step=0.1,
74
- # value=0.5,
75
- # )
76
- # yolov9_infer = gr.Button(value="Inference")
77
-
78
- # with gr.Column():
79
- # output_numpy = gr.Image(type="numpy",label="Output")
80
-
81
- # yolov9_infer.click(
82
- # fn=yolov9_inference,
83
- # inputs=[
84
- # img_path,
85
- # model_path,
86
- # image_size,
87
- # conf_threshold,
88
- # iou_threshold,
89
- # ],
90
- # outputs=[output_numpy],
91
- # )
92
 
93
- # # gr.Examples(
94
- # # examples=[
95
- # # [
96
- # # "data/zidane.jpg",
97
- # # "gelan-e.pt",
98
- # # 640,
99
- # # 0.4,
100
- # # 0.5,
101
- # # ],
102
- # # [
103
- # # "data/huggingface.jpg",
104
- # # "yolov9-c.pt",
105
- # # 640,
106
- # # 0.4,
107
- # # 0.5,
108
- # # ],
109
- # # ],
110
- # # fn=yolov9_inference,
111
- # # inputs=[
112
- # # img_path,
113
- # # model_path,
114
- # # image_size,
115
- # # conf_threshold,
116
- # # iou_threshold,
117
- # # ],
118
- # # outputs=[output_numpy],
119
- # # cache_examples=True,
120
- # # )
121
-
122
-
123
- # gradio_app = gr.Blocks()
124
- # with gradio_app:
125
- # gr.HTML(
126
- # """
127
- # <h1 style='text-align: center'>
128
- # YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information
129
- # </h1>
130
- # """)
131
- # gr.HTML(
132
- # """
133
- # <h3 style='text-align: center'>
134
- # Follow me for more!
135
- # </h3>
136
- # """)
137
- # with gr.Row():
138
- # with gr.Column():
139
- # app()
140
-
141
- # gradio_app.launch(debug=True)
142
-
143
- # make sure you have the following dependencies
144
- import gradio as gr
145
- import torch
146
- from torchvision import transforms
147
- from PIL import Image
148
-
149
- # Load the YOLOv9 model
150
- model_path = "best.pt" # Replace with the path to your YOLOv9 model
151
- model = torch.load(model_path)
152
-
153
- # Define preprocessing transforms
154
- preprocess = transforms.Compose([
155
- transforms.Resize((640, 640)), # Resize image to model input size
156
- transforms.ToTensor(), # Convert image to tensor
157
- ])
158
-
159
- # Define a function to perform inference
160
- def detect_void(image):
161
- # Preprocess the input image
162
- image = Image.fromarray(image)
163
- image = preprocess(image).unsqueeze(0) # Add batch dimension
164
-
165
- # Perform inference
166
- with torch.no_grad():
167
- output = model(image)
168
-
169
- # Post-process the output if needed
170
- # For example, draw bounding boxes on the image
171
-
172
- # Convert the image back to numpy array
173
- # and return the result
174
- return output.squeeze().numpy()
175
 
176
- # Define Gradio interface components
177
- input_image = gr.inputs.Image(shape=(640, 640), label="Input Image")
178
- output_image = gr.outputs.Image(label="Output Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- # Create Gradio interface
181
- gr.Interface(fn=detect_void, inputs=input_image, outputs=output_image, title="Void Detection App").launch()
 
1
+ import gradio as gr
2
+ import spaces
3
+ from huggingface_hub import hf_hub_download
4
+
5
 
6
+ def download_models(model_id):
7
+ hf_hub_download("SakshiRathi77/void-space-detection", filename=f"{model_id}", local_dir=f"./")
8
+ return f"./{model_id}"
9
 
 
 
 
10
 
11
+ def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
12
+ """
13
+ Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
14
+ the input size and apply test time augmentation.
 
15
 
16
+ :param model_path: Path to the YOLOv9 model file.
17
+ :param conf_threshold: Confidence threshold for NMS.
18
+ :param iou_threshold: IoU threshold for NMS.
19
+ :param img_path: Path to the image file.
20
+ :param size: Optional, input size for inference.
21
+ :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
22
+ """
23
+ # Import YOLOv9
24
+ import yolov9
25
 
26
+ # Load the model
27
+ model_path = download_models("best.pt")
28
+ model = yolov9.load(model_path, device="cpu")
29
 
30
+ # Set model parameters
31
+ model.conf = conf_threshold
32
+ model.iou = iou_threshold
33
 
34
+ # Perform inference
35
+ results = model(img_path, size=image_size)
36
 
37
+ # Optionally, show detection bounding boxes on image
38
+ output = results.render()
39
 
40
+ return output[0]
41
+
42
+
43
+ def app():
44
+ with gr.Blocks():
45
+ with gr.Row():
46
+ with gr.Column():
47
+ img_path = gr.Image(type="filepath", label="Image")
48
+
49
+ image_size = gr.Slider(
50
+ label="Image Size",
51
+ minimum=320,
52
+ maximum=1280,
53
+ step=32,
54
+ value=640,
55
+ )
56
+ conf_threshold = gr.Slider(
57
+ label="Confidence Threshold",
58
+ minimum=0.1,
59
+ maximum=1.0,
60
+ step=0.1,
61
+ value=0.4,
62
+ )
63
+ iou_threshold = gr.Slider(
64
+ label="IoU Threshold",
65
+ minimum=0.1,
66
+ maximum=1.0,
67
+ step=0.1,
68
+ value=0.5,
69
+ )
70
+ yolov9_infer = gr.Button(value="Inference")
71
+
72
+ with gr.Column():
73
+ output_numpy = gr.Image(type="numpy",label="Output")
74
+
75
+ yolov9_infer.click(
76
+ fn=yolov9_inference,
77
+ inputs=[
78
+ img_path,
79
+ image_size,
80
+ conf_threshold,
81
+ iou_threshold,
82
+ ],
83
+ outputs=[output_numpy],
84
+ )
 
 
 
 
 
 
 
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ fn=yolov9_inference,
88
+ inputs=[
89
+ img_path,
90
+ model_path,
91
+ image_size,
92
+ conf_threshold,
93
+ iou_threshold,
94
+ ],
95
+ outputs=[output_numpy],
96
+ )
97
+
98
+
99
+ gradio_app = gr.Blocks()
100
+ with gradio_app:
101
+ gr.HTML(
102
+ """
103
+ <h1 style='text-align: center'>
104
+ YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information
105
+ </h1>
106
+ """)
107
+ gr.HTML(
108
+ """
109
+ <h3 style='text-align: center'>
110
+ Follow me for more!
111
+ </h3>
112
+ """)
113
+ with gr.Row():
114
+ with gr.Column():
115
+ app()
116
+
117
+ gradio_app.launch(debug=True)
118