Spaces:
Running
on
T4
Running
on
T4
AAAAAAyq
commited on
Commit
•
7f6d4e3
1
Parent(s):
c2bd1fd
Fix the OOM from the useful suggestion by hysts
Browse files- app.py +8 -2
- app_debug.py +8 -1
- gradio_cached_examples/16/log.csv +2 -0
- gradio_cached_examples/16/output/tmps67a9kx5.png +0 -0
app.py
CHANGED
@@ -159,13 +159,19 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
159 |
|
160 |
def predict(input, input_size=512, high_visual_quality=True):
|
161 |
input_size = int(input_size) # 确保 imgsz 是整数
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
results = model(input, device=device, retina_masks=True, iou=0.7, conf=0.25, imgsz=input_size)
|
163 |
fig = fast_process(annotations=results[0].masks.data,
|
164 |
image=input, high_quality=high_visual_quality, device=device)
|
165 |
return fig
|
166 |
|
167 |
-
|
168 |
-
|
169 |
# input_size=1024
|
170 |
# high_quality_visual=True
|
171 |
# inp = 'assets/sa_192.jpg'
|
|
|
159 |
|
160 |
def predict(input, input_size=512, high_visual_quality=True):
|
161 |
input_size = int(input_size) # 确保 imgsz 是整数
|
162 |
+
|
163 |
+
# Thanks for the suggestion by hysts in HuggingFace.
|
164 |
+
w, h = input.size
|
165 |
+
scale = input_size / max(w, h)
|
166 |
+
new_w = int(w * scale)
|
167 |
+
new_h = int(h * scale)
|
168 |
+
input = input.resize((new_w, new_h))
|
169 |
+
|
170 |
results = model(input, device=device, retina_masks=True, iou=0.7, conf=0.25, imgsz=input_size)
|
171 |
fig = fast_process(annotations=results[0].masks.data,
|
172 |
image=input, high_quality=high_visual_quality, device=device)
|
173 |
return fig
|
174 |
|
|
|
|
|
175 |
# input_size=1024
|
176 |
# high_quality_visual=True
|
177 |
# inp = 'assets/sa_192.jpg'
|
app_debug.py
CHANGED
@@ -157,7 +157,14 @@ def fast_show_mask_gpu(annotation, ax,
|
|
157 |
|
158 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
159 |
|
160 |
-
def predict(input, input_size=512, high_visual_quality=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
input_size = int(input_size) # 确保 imgsz 是整数
|
162 |
results = model(input, device=device, retina_masks=True, iou=0.7, conf=0.25, imgsz=input_size)
|
163 |
fig = fast_process(annotations=results[0].masks.data,
|
|
|
157 |
|
158 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
159 |
|
160 |
+
def predict(input, input_size=512, high_visual_quality=True):
|
161 |
+
# Thanks for the suggestion by hysts in HuggingFace.
|
162 |
+
w, h = input.size
|
163 |
+
scale = input_size / max(w, h)
|
164 |
+
new_w = int(w * scale)
|
165 |
+
new_h = int(h * scale)
|
166 |
+
input = input.resize((new_w, new_h))
|
167 |
+
|
168 |
input_size = int(input_size) # 确保 imgsz 是整数
|
169 |
results = model(input, device=device, retina_masks=True, iou=0.7, conf=0.25, imgsz=input_size)
|
170 |
fig = fast_process(annotations=results[0].masks.data,
|
gradio_cached_examples/16/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
output,flag,username,timestamp
|
2 |
+
/data1/10cls/duyinglong/sam/ultralytics/ultralytics/yolo/v8/segment/demo/FastSAM/gradio_cached_examples/16/output/tmps67a9kx5.png,,,2023-06-22 16:13:18.129722
|
gradio_cached_examples/16/output/tmps67a9kx5.png
ADDED