Zengyf-CVer commited on
Commit
e2e4946
1 Parent(s): 4c5cbfb

v04 update

Browse files
Files changed (2) hide show
  1. app.py +22 -7
  2. requirements.txt +1 -0
app.py CHANGED
@@ -243,6 +243,19 @@ def yolo_det_img(img, device, model_name, infer_size, conf, iou, max_num, model_
243
 
244
  # Data Frame
245
  dataframe = results.pandas().xyxy[0].round(2)
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
  # ----------------Load fonts----------------
248
  yaml_index = cls_name.index(".yaml")
@@ -333,7 +346,7 @@ def yolo_det_img(img, device, model_name, infer_size, conf, iou, max_num, model_
333
  for k, v in clsDet_dict.items():
334
  clsRatio_dict[k] = v / clsDet_dict_sum
335
 
336
- return det_img, img_crops, objSize_dict, clsRatio_dict, dataframe, det_json, report
337
 
338
 
339
  # YOLOv5 video detection function
@@ -479,7 +492,7 @@ def main(args):
479
  inputs_iou01 = gr.Slider(0, 1, step=slider_step, value=nms_iou, label="IoU threshold")
480
  inputs_maxnum01 = gr.Number(value=max_detnum, label="Maximum number of detections")
481
  inputs_clsName01 = gr.CheckboxGroup(choices=model_cls_name, value=model_cls_name, type="index", label="category")
482
- inputs_opt01 = gr.CheckboxGroup(choices=["label", "pdf", "json", "refresh_yolov5"],
483
  value=["label", "pdf"],
484
  type="value",
485
  label="operate")
@@ -493,7 +506,7 @@ def main(args):
493
  inputs_iou02 = gr.Slider(0, 1, step=slider_step, value=nms_iou, label="IoU threshold")
494
  inputs_maxnum02 = gr.Number(value=max_detnum, label="Maximum number of detections")
495
  inputs_clsName02 = gr.CheckboxGroup(choices=model_cls_name, value=model_cls_name, type="index", label="category")
496
- inputs_opt02 = gr.CheckboxGroup(choices=["label", "refresh_yolov5"], value=["label"], type="value", label="operate")
497
 
498
  # Input parameters
499
  inputs_img_list = [
@@ -522,21 +535,23 @@ def main(args):
522
 
523
  # -------------------output component-------------------
524
  outputs_img = gr.Image(type="pil", label="Detection image")
525
- outputs_json = gr.JSON(label="Detection information")
526
- outputs_pdf = gr.File(label="Download test report")
527
  outputs_df = gr.Dataframe(max_rows=5,
528
  overflow_row_behaviour="paginate",
529
  type="pandas",
530
  label="List of detection information")
531
  outputs_objSize = gr.Label(label="Object size ratio statistics")
532
  outputs_clsSize = gr.Label(label="Category detection proportion statistics")
533
- outputs_crops = gr.Gallery(label="Object crop")
 
 
 
534
 
535
  # -------------------output component-------------------
536
  outputs_video = gr.Video(format='mp4', label="Detection video")
537
 
538
  # output parameters
539
- outputs_img_list = [outputs_img, outputs_crops, outputs_objSize, outputs_clsSize, outputs_df, outputs_json, outputs_pdf]
540
  outputs_video_list = [outputs_video]
541
 
542
  # title
 
243
 
244
  # Data Frame
245
  dataframe = results.pandas().xyxy[0].round(2)
246
+
247
+ det_csv = "./Det_Report.csv"
248
+ det_excel = "./Det_Report.xlsx"
249
+
250
+ if "csv" in opt:
251
+ dataframe.to_csv(det_csv, index=False)
252
+ else:
253
+ det_csv = None
254
+
255
+ if "excel" in opt:
256
+ dataframe.to_excel(det_excel, sheet_name='sheet1', index=False)
257
+ else:
258
+ det_excel = None
259
 
260
  # ----------------Load fonts----------------
261
  yaml_index = cls_name.index(".yaml")
 
346
  for k, v in clsDet_dict.items():
347
  clsRatio_dict[k] = v / clsDet_dict_sum
348
 
349
+ return det_img, img_crops, objSize_dict, clsRatio_dict, dataframe, det_json, report, det_csv, det_excel
350
 
351
 
352
  # YOLOv5 video detection function
 
492
  inputs_iou01 = gr.Slider(0, 1, step=slider_step, value=nms_iou, label="IoU threshold")
493
  inputs_maxnum01 = gr.Number(value=max_detnum, label="Maximum number of detections")
494
  inputs_clsName01 = gr.CheckboxGroup(choices=model_cls_name, value=model_cls_name, type="index", label="category")
495
+ inputs_opt01 = gr.CheckboxGroup(choices=["refresh_yolov5", "label", "pdf", "json", "csv", "excel"],
496
  value=["label", "pdf"],
497
  type="value",
498
  label="operate")
 
506
  inputs_iou02 = gr.Slider(0, 1, step=slider_step, value=nms_iou, label="IoU threshold")
507
  inputs_maxnum02 = gr.Number(value=max_detnum, label="Maximum number of detections")
508
  inputs_clsName02 = gr.CheckboxGroup(choices=model_cls_name, value=model_cls_name, type="index", label="category")
509
+ inputs_opt02 = gr.CheckboxGroup(choices=["refresh_yolov5", "label"], value=["label"], type="value", label="operate")
510
 
511
  # Input parameters
512
  inputs_img_list = [
 
535
 
536
  # -------------------output component-------------------
537
  outputs_img = gr.Image(type="pil", label="Detection image")
538
+ outputs_crops = gr.Gallery(label="Object crop")
 
539
  outputs_df = gr.Dataframe(max_rows=5,
540
  overflow_row_behaviour="paginate",
541
  type="pandas",
542
  label="List of detection information")
543
  outputs_objSize = gr.Label(label="Object size ratio statistics")
544
  outputs_clsSize = gr.Label(label="Category detection proportion statistics")
545
+ outputs_json = gr.JSON(label="Detection information")
546
+ outputs_pdf = gr.File(label="pdf detection report")
547
+ outputs_csv = gr.File(label="csv detection report")
548
+ outputs_excel = gr.File(label="xlsx detection report")
549
 
550
  # -------------------output component-------------------
551
  outputs_video = gr.Video(format='mp4', label="Detection video")
552
 
553
  # output parameters
554
+ outputs_img_list = [outputs_img, outputs_crops, outputs_objSize, outputs_clsSize, outputs_df, outputs_json, outputs_pdf, outputs_csv, outputs_excel]
555
  outputs_video_list = [outputs_video]
556
 
557
  # title
requirements.txt CHANGED
@@ -17,6 +17,7 @@ rich>=12.2.0
17
  fpdf>=1.7.2
18
  plotly>=5.7.0
19
  bokeh>=2.4.2
 
20
 
21
  # Logging -------------------------------------
22
  tensorboard>=2.4.1
 
17
  fpdf>=1.7.2
18
  plotly>=5.7.0
19
  bokeh>=2.4.2
20
+ openpyxl>=3.0.10
21
 
22
  # Logging -------------------------------------
23
  tensorboard>=2.4.1