mw commited on
Commit
9c63647
1 Parent(s): 70b1451

Add examples and model

Browse files
Files changed (3) hide show
  1. app.py +11 -11
  2. lego-1.jpg +0 -0
  3. lego-2.jpg +0 -0
app.py CHANGED
@@ -17,26 +17,24 @@ from utils.general import check_img_size, check_requirements, check_imshow, non_
17
  from utils.plots import plot_one_box
18
  from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
19
  from PIL import Image
20
-
21
  from huggingface_hub import hf_hub_download
22
 
23
 
24
  def load_model(model_name):
25
- model_path = hf_hub_download(repo_id=f"Yolov7/{model_name}", filename=f"{model_name}.pt")
26
 
27
  return model_path
28
 
29
 
30
  model_names = [
31
- "yolov7",
32
- "yolov7-e6e",
33
- "yolov7-e6",
34
  ]
35
 
36
  models = {model_name: load_model(model_name) for model_name in model_names}
37
 
38
 
39
- def detect(img,model):
40
  parser = argparse.ArgumentParser()
41
  parser.add_argument('--weights', nargs='+', type=str, default=models[model], help='model.pt path(s)')
42
  parser.add_argument('--source', type=str, default='Inference/', help='source') # file/folder, 0 for webcam
@@ -159,7 +157,7 @@ def detect(img,model):
159
  plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
160
 
161
  # Print time (inference + NMS)
162
- #print(f'{s}Done. ({t2 - t1:.3f}s)')
163
 
164
  # Stream results
165
  if view_img:
@@ -187,11 +185,13 @@ def detect(img,model):
187
 
188
  if save_txt or save_img:
189
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
190
- #print(f"Results saved to {save_dir}{s}")
191
 
192
  print(f'Done. ({time.time() - t0:.3f}s)')
193
 
194
- return Image.fromarray(im0[:,:,::-1])
 
195
 
196
-
197
- gr.Interface(detect,[gr.Image(type="pil"),gr.Dropdown(choices=model_names)], gr.Image(type="pil"),title="Yolov7",examples=[["horses.jpeg", "yolov7"]],description="demo for <a href='https://github.com/WongKinYiu/yolov7' style='text-decoration: underline' target='_blank'>WongKinYiu/yolov7</a> Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors").launch()
 
 
17
  from utils.plots import plot_one_box
18
  from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
19
  from PIL import Image
20
+
21
  from huggingface_hub import hf_hub_download
22
 
23
 
24
  def load_model(model_name):
25
+ model_path = hf_hub_download(repo_id="mw00/yolov7-lego", filename=f"{model_name}.pt")
26
 
27
  return model_path
28
 
29
 
30
  model_names = [
31
+ "best",
 
 
32
  ]
33
 
34
  models = {model_name: load_model(model_name) for model_name in model_names}
35
 
36
 
37
+ def detect(img, model):
38
  parser = argparse.ArgumentParser()
39
  parser.add_argument('--weights', nargs='+', type=str, default=models[model], help='model.pt path(s)')
40
  parser.add_argument('--source', type=str, default='Inference/', help='source') # file/folder, 0 for webcam
 
157
  plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
158
 
159
  # Print time (inference + NMS)
160
+ # print(f'{s}Done. ({t2 - t1:.3f}s)')
161
 
162
  # Stream results
163
  if view_img:
 
185
 
186
  if save_txt or save_img:
187
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
188
+ # print(f"Results saved to {save_dir}{s}")
189
 
190
  print(f'Done. ({time.time() - t0:.3f}s)')
191
 
192
+ return Image.fromarray(im0[:, :, ::-1])
193
+
194
 
195
+ gr.Interface(detect, [gr.Image(type="pil"), gr.Dropdown(choices=model_names)], gr.Image(type="pil"), title="Yolov7",
196
+ examples=[["lego-1.jpg", "yolov7", "lego-2.jpg", "yolov7"]],
197
+ description="demo for <a href='https://github.com/WongKinYiu/yolov7' style='text-decoration: underline' target='_blank'>WongKinYiu/yolov7</a> Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors").launch()
lego-1.jpg ADDED
lego-2.jpg ADDED