rafaelpadilla commited on
Commit
a5c4771
β€’
1 Parent(s): 3ec873b

include FPS and license columns + text with FPS and hardware info

Browse files
Files changed (3) hide show
  1. app.py +22 -6
  2. constants.py +7 -0
  3. init.py +2 -0
app.py CHANGED
@@ -6,7 +6,8 @@ from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
  from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
  from datetime import datetime, timezone
8
 
9
- LAST_UPDATED = "Aug 13th 2023"
 
10
 
11
  column_names = {"AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
12
  "AP-IoU=0.50-area=all-maxDets=100": "AP@.50",
@@ -19,7 +20,10 @@ column_names = {"AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
19
  "AR-IoU=0.50:0.95-area=all-maxDets=100": "AR100",
20
  "AR-IoU=0.50:0.95-area=small-maxDets=100": "AR-S",
21
  "AR-IoU=0.50:0.95-area=medium-maxDets=100": "AR-M",
22
- "AR-IoU=0.50:0.95-area=large-maxDets=100": "AR-L"}
 
 
 
23
 
24
  eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
25
 
@@ -30,18 +34,29 @@ if not csv_results.exists():
30
  original_df = pd.read_csv(csv_results)
31
 
32
  # Formats the columns
33
- def formatter(x):
 
 
 
 
34
  x = "{:.2%}".format(x)
35
  while len(x) < 6:
36
  x = f"0{x}"
37
  return x
38
 
 
 
 
 
39
  for col in original_df.columns:
40
  if col == "model":
41
  original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
 
 
 
 
42
  else:
43
- original_df[col] = original_df[col].apply(formatter) # For % values
44
- # original_df[col] = original_df[col].multiply(100).round(2)
45
 
46
  original_df.rename(columns=column_names, inplace=True)
47
 
@@ -133,8 +148,9 @@ with gr.Blocks() as demo:
133
  [model_name_textbox, chb_coco2017],
134
  mdw_submission_result)
135
 
 
136
  gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
137
-
138
  with gr.Row():
139
  with gr.Accordion("πŸ“™ Citation", open=False):
140
  gr.Textbox(
 
6
  from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
  from datetime import datetime, timezone
8
 
9
+ LAST_UPDATED = "September 7h 2023"
10
+ GPU_MODEL = "NVIDIA Tesla M60"
11
 
12
  column_names = {"AP-IoU=0.50:0.95-area=all-maxDets=100": "AP",
13
  "AP-IoU=0.50-area=all-maxDets=100": "AP@.50",
 
20
  "AR-IoU=0.50:0.95-area=all-maxDets=100": "AR100",
21
  "AR-IoU=0.50:0.95-area=small-maxDets=100": "AR-S",
22
  "AR-IoU=0.50:0.95-area=medium-maxDets=100": "AR-M",
23
+ "AR-IoU=0.50:0.95-area=large-maxDets=100": "AR-L",
24
+ "estimated_fps": "FPS(*)",
25
+ "hub_license": "hub license",
26
+ }
27
 
28
  eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
29
 
 
34
  original_df = pd.read_csv(csv_results)
35
 
36
  # Formats the columns
37
+ def decimal_formatter(x):
38
+ x = "{:.2f}".format(x)
39
+ return x
40
+
41
+ def perc_formatter(x):
42
  x = "{:.2%}".format(x)
43
  while len(x) < 6:
44
  x = f"0{x}"
45
  return x
46
 
47
+ # Drop columns not specified in dictionary
48
+ cols_to_drop = [col for col in original_df.columns if col not in column_names]
49
+ original_df.drop(cols_to_drop, axis=1, inplace=True)
50
+
51
  for col in original_df.columns:
52
  if col == "model":
53
  original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
54
+ elif col == "estimated_fps":
55
+ original_df[col] = original_df[col].apply(decimal_formatter) # For decimal values
56
+ elif col == "hub_license":
57
+ continue
58
  else:
59
+ original_df[col] = original_df[col].apply(perc_formatter) # For % values
 
60
 
61
  original_df.rename(columns=column_names, inplace=True)
62
 
 
148
  [model_name_textbox, chb_coco2017],
149
  mdw_submission_result)
150
 
151
+ gr.Markdown(f"(*) FPS was measured using *{GPU_MODEL}* processing 1 image per batch. Refer to the πŸ“ˆ \"Metrics\" tab for further details.", elem_classes="markdown-text")
152
  gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
153
+
154
  with gr.Row():
155
  with gr.Accordion("πŸ“™ Citation", open=False):
156
  gr.Textbox(
constants.py CHANGED
@@ -58,6 +58,13 @@ There are plenty of variations of these metrics, depending on the IoU threshold,
58
  - **AR-M (ARmedium)**: AR for medium objects: 322 < area < 962.
59
  - **AR-L (ARlarge)**: AR for large objects: area > 962.
60
 
 
 
 
 
 
 
 
61
  ## How to reproduce our results
62
 
63
  To compute these metrics, various tools employ different methods. For this leaderboard's evaluation, we utilize the COCO evaluation approach, which can be found in the [COCO evaluation toolkit](https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py).
 
58
  - **AR-M (ARmedium)**: AR for medium objects: 322 < area < 962.
59
  - **AR-L (ARlarge)**: AR for large objects: area > 962.
60
 
61
+ ## Frames Per Second (FPS)
62
+ We measure the frames per second (FPS) for each model by looking at the average time it takes across the whole dataset. This includes the pre and post processing steps.
63
+
64
+ The hardware we use definitely plays a role in these numbers. You can see which hardware we used in the results table. πŸ“ˆ
65
+
66
+ Because each model has its own specific needs when it comes to batch size and memory requirements, we decided to test them with just 1 image per batch. One thing to keep in mind: this test setup might not fully reflect real-world scenarios. Typically, more images are processed together to get things moving faster. πŸš€"
67
+
68
  ## How to reproduce our results
69
 
70
  To compute these metrics, various tools employ different methods. For this leaderboard's evaluation, we utilize the COCO evaluation approach, which can be found in the [COCO evaluation toolkit](https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py).
init.py CHANGED
@@ -2,7 +2,9 @@ import os
2
  from constants import EVAL_REQUESTS_PATH
3
  from pathlib import Path
4
  from huggingface_hub import HfApi, Repository
 
5
 
 
6
  TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
7
  QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
8
  QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
 
2
  from constants import EVAL_REQUESTS_PATH
3
  from pathlib import Path
4
  from huggingface_hub import HfApi, Repository
5
+ from dotenv import load_dotenv
6
 
7
+ load_dotenv()
8
  TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
9
  QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
10
  QUEUE_PATH = os.environ.get("QUEUE_PATH", None)