Spaces:
Runtime error
Runtime error
Nadine Rueegg
commited on
Commit
β’
4c1801d
1
Parent(s):
432392d
adjust emojis
Browse files- README.md +1 -1
- gradio_demo/barc_demo_v6.py +3 -1
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: BARC
|
3 |
-
emoji:
|
4 |
colorFrom: pink
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
title: BARC
|
3 |
+
emoji: π© πΆ π
|
4 |
colorFrom: pink
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
gradio_demo/barc_demo_v6.py
CHANGED
@@ -37,7 +37,7 @@ print(
|
|
37 |
"\ntorchvision: ", torchvision.__version__,
|
38 |
)
|
39 |
|
40 |
-
|
41 |
|
42 |
def get_prediction(model, img_path_or_img, confidence=0.5):
|
43 |
"""
|
@@ -193,6 +193,8 @@ def run_barc_inference(input_image, bbox=None):
|
|
193 |
|
194 |
|
195 |
def run_complete_inference(img_path_or_img, crop_choice):
|
|
|
|
|
196 |
# depending on crop_choice: run faster r-cnn or take the input image directly
|
197 |
if crop_choice == "input image is cropped":
|
198 |
if isinstance(img_path_or_img, str):
|
|
|
37 |
"\ntorchvision: ", torchvision.__version__,
|
38 |
)
|
39 |
|
40 |
+
global total_count = 0
|
41 |
|
42 |
def get_prediction(model, img_path_or_img, confidence=0.5):
|
43 |
"""
|
|
|
193 |
|
194 |
|
195 |
def run_complete_inference(img_path_or_img, crop_choice):
|
196 |
+
total_count += 1
|
197 |
+
print('total count: ' + str(total_count))
|
198 |
# depending on crop_choice: run faster r-cnn or take the input image directly
|
199 |
if crop_choice == "input image is cropped":
|
200 |
if isinstance(img_path_or_img, str):
|