Zengyf-CVer commited on
Commit
1f1d55a
1 Parent(s): b6b17d2

v04 update

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +12 -4
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🚀
4
  colorFrom: red
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.0.6
8
  app_file: app.py
9
  pinned: false
10
  license: gpl-3.0
 
4
  colorFrom: red
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.0.9
8
  app_file: app.py
9
  pinned: false
10
  license: gpl-3.0
app.py CHANGED
@@ -227,6 +227,12 @@ def yolo_det_img(img, device, model_name, infer_size, conf, iou, max_num, model_
227
  img_size = img.size # frame size
228
 
229
  results = model(img, size=infer_size) # detection
 
 
 
 
 
 
230
 
231
  # Data Frame
232
  dataframe = results.pandas().xyxy[0].round(2)
@@ -320,7 +326,7 @@ def yolo_det_img(img, device, model_name, infer_size, conf, iou, max_num, model_
320
  for k, v in clsDet_dict.items():
321
  clsRatio_dict[k] = v / clsDet_dict_sum
322
 
323
- return det_img, objSize_dict, clsRatio_dict, det_json, report, dataframe
324
 
325
 
326
  # YOLOv5 video detection function
@@ -517,12 +523,13 @@ def main(args):
517
  label="List of detection information")
518
  outputs_objSize = gr.Label(label="Object size ratio statistics")
519
  outputs_clsSize = gr.Label(label="Category detection proportion statistics")
 
520
 
521
  # -------------------output component-------------------
522
  outputs_video = gr.Video(format='mp4', label="Detection video")
523
 
524
  # output parameters
525
- outputs_img_list = [outputs_img, outputs_objSize, outputs_clsSize, outputs_json, outputs_pdf, outputs_df]
526
  outputs_video_list = [outputs_video]
527
 
528
  # title
@@ -559,7 +566,7 @@ def main(args):
559
  "cpu",
560
  "yolov5m",
561
  640,
562
- 0.25,
563
  0.5,
564
  15,
565
  ["person", "tie"],
@@ -592,13 +599,14 @@ def main(args):
592
  )
593
 
594
  gyd_video = gr.Interface(
 
595
  fn=yolo_det_video,
596
  inputs=inputs_video_list,
597
  outputs=outputs_video_list,
598
  title=title,
599
  description=description,
600
  # article=article,
601
- # examples=examples,
602
  # theme="seafoam",
603
  # live=True, # Change output in real time
604
  flagging_dir="run", # output directory
 
227
  img_size = img.size # frame size
228
 
229
  results = model(img, size=infer_size) # detection
230
+
231
+ # ----------------目标裁剪----------------
232
+ crops = results.crop(save=False)
233
+ img_crops = []
234
+ for i in range(len(crops)):
235
+ img_crops.append(crops[i]["im"][..., ::-1])
236
 
237
  # Data Frame
238
  dataframe = results.pandas().xyxy[0].round(2)
 
326
  for k, v in clsDet_dict.items():
327
  clsRatio_dict[k] = v / clsDet_dict_sum
328
 
329
+ return det_img, img_crops, objSize_dict, clsRatio_dict, dataframe, det_json, report
330
 
331
 
332
  # YOLOv5 video detection function
 
523
  label="List of detection information")
524
  outputs_objSize = gr.Label(label="Object size ratio statistics")
525
  outputs_clsSize = gr.Label(label="Category detection proportion statistics")
526
+ outputs_crops = gr.Gallery(label="Object crop")
527
 
528
  # -------------------output component-------------------
529
  outputs_video = gr.Video(format='mp4', label="Detection video")
530
 
531
  # output parameters
532
+ outputs_img_list = [outputs_img, outputs_crops, outputs_objSize, outputs_clsSize, outputs_df, outputs_json, outputs_pdf]
533
  outputs_video_list = [outputs_video]
534
 
535
  # title
 
566
  "cpu",
567
  "yolov5m",
568
  640,
569
+ 0.6,
570
  0.5,
571
  15,
572
  ["person", "tie"],
 
599
  )
600
 
601
  gyd_video = gr.Interface(
602
+ # fn=yolo_det_video_test,
603
  fn=yolo_det_video,
604
  inputs=inputs_video_list,
605
  outputs=outputs_video_list,
606
  title=title,
607
  description=description,
608
  # article=article,
609
+ examples=examples,
610
  # theme="seafoam",
611
  # live=True, # Change output in real time
612
  flagging_dir="run", # output directory