SakshiRathi77 commited on
Commit
2ff1618
1 Parent(s): ed1c000

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -186
app.py CHANGED
@@ -1,196 +1,21 @@
1
- # import gradio as gr
2
- # import spaces
3
- # from huggingface_hub import hf_hub_download
4
- # # Import YOLOv9
5
- # import yolov9
6
-
7
- # def download_models(model_id):
8
- # hf_hub_download("SakshiRathi77/void-space-detection/weights", filename=f"{model_id}", local_dir=f"./")
9
- # return f"./{model_id}"
10
-
11
- # def download_models(model_id):
12
- # hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./")
13
- # return f"./{model_id}"
14
-
15
- # def yolov9_inference(img_path, image_size, conf_threshold, iou_threshold):
16
- # """
17
- # Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
18
- # the input size and apply test time augmentation.
19
-
20
- # :param model_path: Path to the YOLOv9 model file.
21
- # :param conf_threshold: Confidence threshold for NMS.
22
- # :param iou_threshold: IoU threshold for NMS.
23
- # :param img_path: Path to the image file.
24
- # :param size: Optional, input size for inference.
25
- # :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
26
- # """
27
-
28
-
29
- # # Load the model
30
- # model_path = download_models()
31
- # # model = yolov9.load("./best.pt")
32
-
33
- # # Set model parameters
34
- # model.conf = conf_threshold
35
- # model.iou = iou_threshold
36
-
37
- # # Perform inference
38
- # results = model(img_path, size=image_size)
39
-
40
- # # Optionally, show detection bounding boxes on image
41
- # output = results.render()
42
-
43
- # return output[0]
44
-
45
-
46
- # def app():
47
- # with gr.Blocks():
48
- # with gr.Row():
49
- # with gr.Column():
50
- # img_path = gr.Image(type="filepath", label="Image")
51
- # image_size = gr.Slider(
52
- # label="Image Size",
53
- # minimum=320,
54
- # maximum=1280,
55
- # step=32,
56
- # value=640,
57
- # )
58
- # conf_threshold = gr.Slider(
59
- # label="Confidence Threshold",
60
- # minimum=0.1,
61
- # maximum=1.0,
62
- # step=0.1,
63
- # value=0.4,
64
- # )
65
- # iou_threshold = gr.Slider(
66
- # label="IoU Threshold",
67
- # minimum=0.1,
68
- # maximum=1.0,
69
- # step=0.1,
70
- # value=0.5,
71
- # )
72
- # yolov9_infer = gr.Button(value="Inference")
73
-
74
- # with gr.Column():
75
- # output_numpy = gr.Image(type="numpy",label="Output")
76
-
77
- # yolov9_infer.click(
78
- # fn=yolov9_inference,
79
- # inputs=[
80
- # img_path,
81
- # # model_path,
82
- # image_size,
83
- # conf_threshold,
84
- # iou_threshold,
85
- # ],
86
- # outputs=[output_numpy],
87
- # )
88
-
89
-
90
-
91
-
92
- # gradio_app = gr.Blocks()
93
- # with gradio_app:
94
- # gr.HTML(
95
- # """
96
- # <h1 style='text-align: center'>
97
- # YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information
98
- # </h1>
99
- # """)
100
- # gr.HTML(
101
- # """
102
- # <h3 style='text-align: center'>
103
- # Follow me for more!
104
- # </h3>
105
- # """)
106
- # with gr.Row():
107
- # with gr.Column():
108
- # app()
109
-
110
- # gradio_app.launch(debug=True)
111
-
112
- # make sure you have the following dependencies
113
- # import gradio as gr
114
- # import torch
115
- # from torchvision import transforms
116
- # from PIL import Image
117
-
118
- # # Load the YOLOv9 model
119
- # model_path = "best.pt" # Replace with the path to your YOLOv9 model
120
- # model = torch.load(model_path)
121
-
122
- # # Define preprocessing transforms
123
- # preprocess = transforms.Compose([
124
- # transforms.Resize((640, 640)), # Resize image to model input size
125
- # transforms.ToTensor(), # Convert image to tensor
126
- # ])
127
-
128
- # # Define a function to perform inference
129
- # def detect_void(image):
130
- # # Preprocess the input image
131
- # image = Image.fromarray(image)
132
- # image = preprocess(image).unsqueeze(0) # Add batch dimension
133
-
134
- # # Perform inference
135
- # with torch.no_grad():
136
- # output = model(image)
137
-
138
- # # Post-process the output if needed
139
- # # For example, draw bounding boxes on the image
140
-
141
- # # Convert the image back to numpy array
142
- # # and return the result
143
- # return output.squeeze().numpy()
144
-
145
- # # Define Gradio interface components
146
- # input_image = gr.inputs.Image(shape=(640, 640), label="Input Image")
147
- # output_image = gr.outputs.Image(label="Output Image")
148
-
149
- # # Create Gradio interface
150
- # gr.Interface(fn=detect_void, inputs=input_image, outputs=output_image, title="Void Detection App").launch()
151
-
152
-
153
-
154
 
155
  import gradio as gr
156
  import spaces
157
  from huggingface_hub import hf_hub_download
158
-
159
-
160
- # def download_models(model_id):
161
- # hf_hub_download("SakshiRathi77/void-space-detection/weights", filename=f"{model_id}", local_dir=f"./")
162
- # return f"./{model_id}"
163
-
164
 
165
  def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
166
- """
167
- Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
168
- the input size and apply test time augmentation.
169
-
170
- :param model_path: Path to the YOLOv9 model file.
171
- :param conf_threshold: Confidence threshold for NMS.
172
- :param iou_threshold: IoU threshold for NMS.
173
- :param img_path: Path to the image file.
174
- :param size: Optional, input size for inference.
175
- :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
176
- """
177
- # Import YOLOv9
178
- import yolov9
179
-
180
  # Load the model
181
  # model_path = download_models(model_id)
182
  model = yolov9.load(model_id)
183
-
184
  # Set model parameters
185
  model.conf = conf_threshold
186
  model.iou = iou_threshold
187
-
188
  # Perform inference
189
  results = model(img_path, size=image_size)
190
-
191
  # Optionally, show detection bounding boxes on image
192
  output = results.render()
193
-
194
  return output[0]
195
 
196
 
@@ -202,7 +27,7 @@ def app():
202
  model_path = gr.Dropdown(
203
  label="Model",
204
  choices=[
205
- "best.pt",
206
  ],
207
  value="./best.pt",
208
  )
@@ -252,16 +77,9 @@ with gradio_app:
252
  gr.HTML(
253
  """
254
  <h1 style='text-align: center'>
255
- YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information
256
  </h1>
257
  """)
258
- gr.HTML(
259
- """
260
- <h3 style='text-align: center'>
261
- Follow me for more!
262
- <a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a> | <a href='https://www.huggingface.co/kadirnar/' target='_blank'>HuggingFace</a>
263
- </h3>
264
- """)
265
  with gr.Row():
266
  with gr.Column():
267
  app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  import gradio as gr
3
  import spaces
4
  from huggingface_hub import hf_hub_download
5
+ import yolov9
 
 
 
 
 
6
 
7
  def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
8
+
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Load the model
10
  # model_path = download_models(model_id)
11
  model = yolov9.load(model_id)
 
12
  # Set model parameters
13
  model.conf = conf_threshold
14
  model.iou = iou_threshold
 
15
  # Perform inference
16
  results = model(img_path, size=image_size)
 
17
  # Optionally, show detection bounding boxes on image
18
  output = results.render()
 
19
  return output[0]
20
 
21
 
 
27
  model_path = gr.Dropdown(
28
  label="Model",
29
  choices=[
30
+ "./best.pt",
31
  ],
32
  value="./best.pt",
33
  )
 
77
  gr.HTML(
78
  """
79
  <h1 style='text-align: center'>
80
+ YOLOv9: Detect Void Space in Retail Shelf
81
  </h1>
82
  """)
 
 
 
 
 
 
 
83
  with gr.Row():
84
  with gr.Column():
85
  app()